]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'crypto/master'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 01:09:36 +0000 (12:09 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 01:09:36 +0000 (12:09 +1100)
2033 files changed:
.mailmap
Documentation/ABI/obsolete/sysfs-class-rfkill
Documentation/ABI/removed/sysfs-class-rfkill [new file with mode: 0644]
Documentation/ABI/stable/sysfs-fs-orangefs [new file with mode: 0644]
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/media/v4l/media-ioc-g-topology.xml
Documentation/DocBook/media/v4l/media-types.xml
Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml [deleted file]
Documentation/DocBook/media/v4l/pixfmt.xml
Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
Documentation/DocBook/media/v4l/vidioc-querystd.xml
Documentation/Intel-IOMMU.txt
Documentation/arm/sunxi/README
Documentation/arm64/booting.txt
Documentation/cgroup-v2.txt
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/arm/keystone/keystone.txt
Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
Documentation/devicetree/bindings/arm/samsung/exynos-srom.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/sunxi.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/dma/arm-pl330.txt
Documentation/devicetree/bindings/firmware/qcom,scm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/i2c/mt9v032.txt
Documentation/devicetree/bindings/media/i2c/tvp5150.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/rcar_vin.txt
Documentation/devicetree/bindings/media/renesas,jpu.txt
Documentation/devicetree/bindings/media/ti-cal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/mtd/qcom_nandc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
Documentation/devicetree/bindings/net/mdio-mux.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
Documentation/devicetree/bindings/pci/rcar-pci.txt
Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
Documentation/devicetree/bindings/thermal/rcar-thermal.txt
Documentation/dvb/README.dvb-usb
Documentation/features/vm/huge-vmap/arch-support.txt
Documentation/filesystems/orangefs.txt [new file with mode: 0644]
Documentation/filesystems/proc.txt
Documentation/hwmon/ltc2990 [new file with mode: 0644]
Documentation/hwmon/max34440
Documentation/kernel-parameters.txt
Documentation/networking/batman-adv.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/mac80211-injection.txt
Documentation/virtual/kvm/api.txt
Documentation/watchdog/watchdog-parameters.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/pci.h
arch/arc/Kconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/include/asm/mcip.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/time.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/arm-soc-for-next-contents.txt [new file with mode: 0644]
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-cm-t335.dts
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/am57xx-cl-som-am57x.dts
arch/arm/boot/dts/am57xx-sbc-am57x.dts
arch/arm/boot/dts/armada-370-mirabox.dts
arch/arm/boot/dts/armada-370-netgear-rn104.dts
arch/arm/boot/dts/armada-370-synology-ds213j.dts
arch/arm/boot/dts/armada-385-db-ap.dts
arch/arm/boot/dts/armada-388-gp.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
arch/arm/boot/dts/at91-sama5d2_xplained.dts
arch/arm/boot/dts/at91-sama5d4_xplained.dts
arch/arm/boot/dts/at91-sama5d4ek.dts
arch/arm/boot/dts/at91sam9n12ek.dts
arch/arm/boot/dts/bcm2835-rpi-a.dts [new file with mode: 0644]
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/emev2.dtsi
arch/arm/boot/dts/exynos3250-monk.dts
arch/arm/boot/dts/exynos3250-rinato.dts
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos4210-origen.dts
arch/arm/boot/dts/exynos4210-smdkv310.dts
arch/arm/boot/dts/exynos4210-trats.dts
arch/arm/boot/dts/exynos4210-universal_c210.dts
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/exynos4412-odroidx.dts
arch/arm/boot/dts/exynos4412-origen.dts
arch/arm/boot/dts/exynos4412-smdk4412.dts
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos5.dtsi
arch/arm/boot/dts/exynos5250-arndale.dts
arch/arm/boot/dts/exynos5250-snow-common.dtsi
arch/arm/boot/dts/exynos5250-spring.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5410-pinctrl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5410-smdk5410.dts
arch/arm/boot/dts/exynos5410.dtsi
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420-cpus.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/exynos5420-smdk5420.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/exynos5422-cpus.dtsi
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
arch/arm/boot/dts/exynos5800-peach-pi.dts
arch/arm/boot/dts/exynos5800.dtsi
arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
arch/arm/boot/dts/imx28-apf28dev.dts
arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi
arch/arm/boot/dts/imx28-tx28.dts
arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
arch/arm/boot/dts/imx53-ard.dts
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/boot/dts/imx53-tx53-x03x.dts
arch/arm/boot/dts/imx53-tx53-x13x.dts
arch/arm/boot/dts/imx53-tx53.dtsi
arch/arm/boot/dts/imx6dl-tx6u-811x.dts
arch/arm/boot/dts/imx6q-gk802.dts
arch/arm/boot/dts/imx6q-icore-rqs.dts [new file with mode: 0644]
arch/arm/boot/dts/imx6q-tx6q-1110.dts
arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
arch/arm/boot/dts/imx6qdl-gw552x.dtsi
arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx6qdl-microsom.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6qdl-tx6.dtsi
arch/arm/boot/dts/imx6qdl-udoo.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl-warp.dts
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dts
arch/arm/boot/dts/imx7d-sbc-imx7.dts
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/k2g-evm.dts [new file with mode: 0644]
arch/arm/boot/dts/k2g.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-ds112.dts
arch/arm/boot/dts/kirkwood-linkstation-6282.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-duo-6281.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lsqvl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lsvl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lswsxl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lswvl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lswxl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-lswvl.dts [deleted file]
arch/arm/boot/dts/kirkwood-lswxl.dts [deleted file]
arch/arm/boot/dts/kirkwood-openrd-client.dts
arch/arm/boot/dts/kirkwood-openrd.dtsi
arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
arch/arm/boot/dts/kirkwood.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/mvebu-linkstation-fan.dtsi [new file with mode: 0644]
arch/arm/boot/dts/mvebu-linkstation-gpio-simple.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap3-n950.dts
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap36xx.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/orion5x-linkstation-lsgl.dts [new file with mode: 0644]
arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
arch/arm/boot/dts/orion5x-linkstation.dtsi [new file with mode: 0644]
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/qcom-apq8084.dtsi
arch/arm/boot/dts/qcom-ipq8064.dtsi
arch/arm/boot/dts/qcom-msm8660.dtsi
arch/arm/boot/dts/qcom-msm8974.dtsi
arch/arm/boot/dts/r7s72100.dtsi
arch/arm/boot/dts/r8a73a4.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7778-bockw.dts
arch/arm/boot/dts/r8a7778.dtsi
arch/arm/boot/dts/r8a7779-marzen.dts
arch/arm/boot/dts/r8a7779.dtsi
arch/arm/boot/dts/r8a7790-lager.dts
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/r8a7791-porter.dts
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/r8a7793-gose.dts
arch/arm/boot/dts/r8a7793.dtsi
arch/arm/boot/dts/r8a7794-alt.dts
arch/arm/boot/dts/r8a7794-silk.dts
arch/arm/boot/dts/r8a7794.dtsi
arch/arm/boot/dts/rk3036-kylin.dts
arch/arm/boot/dts/rk3036.dtsi
arch/arm/boot/dts/rk3066a-bqcurie2.dts
arch/arm/boot/dts/rk3066a-marsboard.dts
arch/arm/boot/dts/rk3066a-rayeager.dts
arch/arm/boot/dts/rk3066a.dtsi
arch/arm/boot/dts/rk3188-radxarock.dts
arch/arm/boot/dts/rk3188.dtsi
arch/arm/boot/dts/rk3288-evb.dtsi
arch/arm/boot/dts/rk3288-firefly.dtsi
arch/arm/boot/dts/rk3288-popmetal.dts
arch/arm/boot/dts/rk3288-r89.dts
arch/arm/boot/dts/rk3288-rock2-som.dtsi
arch/arm/boot/dts/rk3288-rock2-square.dts
arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/boot/dts/s5pv210-aquila.dts
arch/arm/boot/dts/s5pv210-goni.dts
arch/arm/boot/dts/s5pv210-smdkv210.dts
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/boot/dts/sh73a0.dtsi
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
arch/arm/boot/dts/sun5i-r8-chip.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
arch/arm/boot/dts/sun7i-a20-itead-ibox.dts [new file with mode: 0644]
arch/arm/boot/dts/sun8i-a23-a33.dtsi
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts [new file with mode: 0644]
arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts [new file with mode: 0644]
arch/arm/boot/dts/sun8i-a83t.dtsi [new file with mode: 0644]
arch/arm/boot/dts/sun8i-h3.dtsi
arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
arch/arm/boot/dts/sun9i-a80-optimus.dts
arch/arm/boot/dts/sun9i-a80.dtsi
arch/arm/boot/dts/sunxi-itead-core-common.dtsi [new file with mode: 0644]
arch/arm/boot/dts/tegra114.dtsi
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30.dtsi
arch/arm/boot/dts/vf-colibri.dtsi
arch/arm/boot/dts/vfxxx.dtsi
arch/arm/common/icst.c
arch/arm/configs/exynos_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/mvebu_v5_defconfig
arch/arm/configs/mvebu_v7_defconfig
arch/arm/configs/mxs_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/include/asm/div64.h
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pci.h
arch/arm/include/debug/imx.S
arch/arm/include/debug/palmchip.S [new file with mode: 0644]
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/entry-armv.S
arch/arm/kernel/hibernate.c
arch/arm/kernel/irq.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/reboot.c
arch/arm/kernel/topology.c
arch/arm/kernel/vmlinux-xip.lds.S [new file with mode: 0644]
arch/arm/kernel/vmlinux.lds.S
arch/arm/kvm/arm.c
arch/arm/mach-cns3xxx/Makefile.boot [deleted file]
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/Makefile
arch/arm/mach-exynos/Makefile.boot [deleted file]
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/include/mach/map.h
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-exynos/pmu.c [deleted file]
arch/arm/mach-exynos/regs-srom.h [deleted file]
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/3ds_debugboard.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/anatop.c
arch/arm/mach-imx/avic.c
arch/arm/mach-imx/cpu-imx27.c
arch/arm/mach-imx/cpu-imx31.c
arch/arm/mach-imx/cpu-imx35.c
arch/arm/mach-imx/cpu.c
arch/arm/mach-imx/epit.c
arch/arm/mach-imx/headsmp.S
arch/arm/mach-imx/iomux-imx31.c
arch/arm/mach-imx/iomux-v1.c
arch/arm/mach-imx/iomux-v3.c
arch/arm/mach-imx/mach-armadillo5x0.c
arch/arm/mach-imx/mach-imx51.c
arch/arm/mach-imx/mach-mx27ads.c
arch/arm/mach-imx/mach-mx31ads.c
arch/arm/mach-imx/mach-mx31moboard.c
arch/arm/mach-imx/mach-qong.c
arch/arm/mach-imx/mxc.h
arch/arm/mach-imx/pm-imx27.c
arch/arm/mach-imx/pm-imx3.c
arch/arm/mach-imx/pm-imx5.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-imx/system.c
arch/arm/mach-imx/tzic.c
arch/arm/mach-integrator/Kconfig
arch/arm/mach-integrator/Makefile.boot [deleted file]
arch/arm/mach-keystone/Makefile.boot [deleted file]
arch/arm/mach-keystone/keystone.c
arch/arm/mach-mmp/Makefile.boot [deleted file]
arch/arm/mach-mv78xx0/Kconfig
arch/arm/mach-mv78xx0/Makefile.boot [deleted file]
arch/arm/mach-mvebu/platsmp.c
arch/arm/mach-netx/Kconfig
arch/arm/mach-nspire/Makefile.boot [deleted file]
arch/arm/mach-omap2/Makefile.boot [deleted file]
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/mach-omap2/sleep44xx.S
arch/arm/mach-orion5x/Makefile.boot [deleted file]
arch/arm/mach-prima2/Makefile.boot [deleted file]
arch/arm/mach-qcom/Kconfig
arch/arm/mach-realview/Kconfig
arch/arm/mach-realview/Makefile.boot [deleted file]
arch/arm/mach-realview/platsmp-dt.c
arch/arm/mach-s3c64xx/Kconfig
arch/arm/mach-s3c64xx/Makefile.boot [deleted file]
arch/arm/mach-shmobile/common.h
arch/arm/mach-shmobile/cpufreq.c
arch/arm/mach-shmobile/emev2.h [new file with mode: 0644]
arch/arm/mach-shmobile/headsmp-scu.S
arch/arm/mach-shmobile/platsmp-scu.c
arch/arm/mach-shmobile/setup-emev2.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-rcar-gen2.c
arch/arm/mach-shmobile/smp-emev2.c
arch/arm/mach-shmobile/smp-r8a7779.c
arch/arm/mach-shmobile/smp-sh73a0.c
arch/arm/mach-shmobile/suspend.c
arch/arm/mach-shmobile/timer.c
arch/arm/mach-spear/Makefile.boot [deleted file]
arch/arm/mach-sunxi/sunxi.c
arch/arm/mach-tango/Kconfig
arch/arm/mach-tango/platsmp.c
arch/arm/mach-u300/Makefile.boot [deleted file]
arch/arm/mach-ux500/Makefile.boot [deleted file]
arch/arm/mach-zynq/Makefile.boot [deleted file]
arch/arm/mm/Kconfig
arch/arm/mm/cache-tauros2.c
arch/arm/mm/idmap.c
arch/arm/mm/init.c
arch/arm/plat-orion/time.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/map-s5p.h
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/Makefile
arch/arm64/boot/dts/amd/Makefile
arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/amd/husky.dts [new file with mode: 0644]
arch/arm64/boot/dts/arm/juno-base.dtsi
arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
arch/arm64/boot/dts/nvidia/tegra132.dtsi
arch/arm64/boot/dts/nvidia/tegra210.dtsi
arch/arm64/boot/dts/qcom/Makefile
arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/qcom/msm8996-mtp.dts [new file with mode: 0644]
arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/msm8996.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/pm8004.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/pm8994.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/pmi8994.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
arch/arm64/boot/dts/rockchip/rk3368-r88.dts
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/boot.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/fixmap.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/hardirq.h
arch/arm64/include/asm/kasan.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/pci.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/smp.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/acpi_parking_protocol.c [new file with mode: 0644]
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/head.S
arch/arm64/kernel/image.h
arch/arm64/kernel/pci.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/sys_regs.c
arch/arm64/lib/copy_page.S
arch/arm64/mm/dump.c
arch/arm64/mm/init.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/proc-macros.S
arch/arm64/mm/proc.S
arch/h8300/boot/dts/edosk2674.dts
arch/h8300/boot/dts/h8300h_sim.dts
arch/h8300/boot/dts/h8s_sim.dts
arch/m32r/Kconfig
arch/m68k/coldfire/device.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/kernel/ftrace.c
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/syscall_table.S
arch/mips/Kconfig
arch/mips/ath79/Kconfig
arch/mips/ath79/setup.c
arch/mips/bcm63xx/nvram.c
arch/mips/boot/compressed/Makefile
arch/mips/boot/dts/brcm/bcm6328.dtsi
arch/mips/boot/dts/brcm/bcm7125.dtsi
arch/mips/boot/dts/brcm/bcm7346.dtsi
arch/mips/boot/dts/brcm/bcm7358.dtsi
arch/mips/boot/dts/brcm/bcm7360.dtsi
arch/mips/boot/dts/brcm/bcm7362.dtsi
arch/mips/boot/dts/brcm/bcm7420.dtsi
arch/mips/boot/dts/brcm/bcm7425.dtsi
arch/mips/boot/dts/brcm/bcm7435.dtsi
arch/mips/boot/dts/qca/Makefile
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/highmem.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
arch/mips/include/asm/mach-jz4740/jz4740_nand.h
arch/mips/include/asm/pci.h
arch/mips/loongson64/loongson-3/smp.c
arch/mips/pmcs-msp71xx/msp_setup.c
arch/mips/vr41xx/common/pmu.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/eeh.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/trace.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/eeh_pe.c
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/perf/power8-pmu.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/s390/include/asm/irqflags.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/pci_io.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/ptrace.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/early.c
arch/s390/kernel/ftrace.c
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/traps.c
arch/s390/kvm/Kconfig
arch/s390/kvm/Makefile
arch/s390/kvm/guestdbg.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/mm/mmap.c
arch/s390/mm/pgtable.c
arch/s390/numa/numa.c
arch/s390/oprofile/backtrace.c
arch/s390/pci/pci.c
arch/s390/pci/pci_event.c
arch/sh/include/asm/barrier.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/entry.S
arch/sparc/kernel/hvcalls.S
arch/sparc/kernel/sparc_ksyms_64.c
arch/sparc/kernel/syscalls.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/tile/kernel/kgdb.c
arch/um/include/asm/page.h
arch/unicore32/include/asm/pci.h
arch/x86/Kconfig
arch/x86/include/asm/irq.h
arch/x86/include/asm/pgtable_types.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
arch/x86/kernel/head64.c
arch/x86/kernel/irq.c
arch/x86/mm/hugetlbpage.c
arch/x86/mm/pageattr.c
arch/x86/pci/common.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-quark/imr.c
arch/x86/um/os-Linux/task_size.c
arch/xtensa/Kconfig
arch/xtensa/include/asm/io.h
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/timex.h
arch/xtensa/kernel/traps.c
arch/xtensa/mm/Makefile
arch/xtensa/mm/ioremap.c [new file with mode: 0644]
block/blk-merge.c
block/ioctl.c
block/partition-generic.c
crypto/Kconfig
crypto/algif_hash.c
crypto/algif_skcipher.c
crypto/crypto_user.c
crypto/shash.c
drivers/acpi/acpi_lpss.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_brcmstb.c
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/pata_macio.c
drivers/base/component.c
drivers/base/platform-msi.c
drivers/base/platform.c
drivers/base/power/common.c
drivers/base/power/domain.c
drivers/base/power/opp/core.c
drivers/base/power/opp/opp.h
drivers/base/regmap/regmap-mmio.c
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/driver_gpio.c
drivers/bcma/driver_mips.c
drivers/bcma/host_pci.c
drivers/bcma/scan.c
drivers/bluetooth/ath3k.c
drivers/bus/Kconfig
drivers/bus/sunxi-rsb.c
drivers/bus/vexpress-config.c
drivers/char/agp/uninorth-agp.c
drivers/char/hw_random/Kconfig
drivers/char/ipmi/ipmi_si_intf.c
drivers/clk/rockchip/clk-rk3036.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3228.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/rockchip/clk-rk3368.c
drivers/clk/rockchip/clk.c
drivers/clk/rockchip/clk.h
drivers/clk/sunxi/clk-a10-hosc.c
drivers/clk/sunxi/clk-a20-gmac.c
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-simple-gates.c
drivers/clk/sunxi/clk-sun6i-apb0-gates.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-bus-gates.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/sunxi/clk-sun9i-core.c
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/sunxi/clk-usb.c
drivers/clk/tegra/clk-emc.c
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra-super-gen4.c
drivers/clk/tegra/clk-tegra210.c
drivers/clocksource/Kconfig
drivers/clocksource/tcb_clksrc.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_performance.c
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/cpufreq_userspace.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpuidle/coupled.c
drivers/cpuidle/cpuidle.c
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-sha.c
drivers/crypto/marvell/cesa.c
drivers/dma/dmaengine.c
drivers/dma/dw/pci.c
drivers/dma/dw/regs.h
drivers/dma/edma.c
drivers/dma/ep93xx_dma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/prep.c
drivers/dma/pl330.c
drivers/dma/sh/Kconfig
drivers/firmware/psci.c
drivers/firmware/qcom_scm-32.c
drivers/firmware/qcom_scm-64.c
drivers/firmware/qcom_scm.c
drivers/firmware/qcom_scm.h
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/etnaviv/common.xml.h
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem.h
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/state_hi.xml.h
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i2c/adv7511.c
drivers/gpu/drm/i2c/adv7511.h
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_audio.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/vce_v1_0.c
drivers/gpu/drm/rockchip/Makefile
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/vc4/vc4_v3d.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/hid/hid-core.c
drivers/hid/hid-dr.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-rmi.c
drivers/hid/hid-sony.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/emc2103.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/ltc2990.c [new file with mode: 0644]
drivers/hwmon/vexpress-hwmon.c [moved from drivers/hwmon/vexpress.c with 100% similarity]
drivers/hwspinlock/hwspinlock_core.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-piix4.c
drivers/ide/pdc202xx_new.c
drivers/ide/pmac.c
drivers/iio/accel/Kconfig
drivers/iio/adc/Kconfig
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/dac/mcp4725.c
drivers/iio/humidity/dht11.c
drivers/iio/imu/adis_buffer.c
drivers/iio/imu/inv_mpu6050/Kconfig
drivers/iio/inkern.c
drivers/iio/light/acpi-als.c
drivers/iio/light/ltr501.c
drivers/iio/pressure/mpl115.c
drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
drivers/infiniband/core/ud_header.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/adp5589-keys.c
drivers/input/keyboard/cap11xx.c
drivers/input/misc/Kconfig
drivers/input/misc/sirfsoc-onkey.c
drivers/input/mouse/vmmouse.c
drivers/input/serio/serio.c
drivers/input/touchscreen/colibri-vf50-ts.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/irq-s3c24xx.c
drivers/macintosh/macio_asic.c
drivers/mailbox/Kconfig
drivers/mailbox/pcc.c
drivers/md/bitmap.c
drivers/md/faulty.c
drivers/md/md-cluster.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/b2c2/flexcop-fe-tuner.c
drivers/media/common/b2c2/flexcop.c
drivers/media/common/cypress_firmware.c
drivers/media/common/cypress_firmware.h
drivers/media/dvb-core/dvb-usb-ids.h
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvbdev.c
drivers/media/dvb-frontends/af9013.c
drivers/media/dvb-frontends/af9033.c
drivers/media/dvb-frontends/bcm3510.c
drivers/media/dvb-frontends/bcm3510.h
drivers/media/dvb-frontends/bcm3510_priv.h
drivers/media/dvb-frontends/cxd2820r_core.c
drivers/media/dvb-frontends/dib0070.c
drivers/media/dvb-frontends/dib0090.c
drivers/media/dvb-frontends/dib3000.h
drivers/media/dvb-frontends/dib3000mb.c
drivers/media/dvb-frontends/dib3000mb_priv.h
drivers/media/dvb-frontends/dib3000mc.c
drivers/media/dvb-frontends/dib3000mc.h
drivers/media/dvb-frontends/dib7000m.c
drivers/media/dvb-frontends/dib7000p.c
drivers/media/dvb-frontends/dib8000.c
drivers/media/dvb-frontends/dib9000.c
drivers/media/dvb-frontends/dibx000_common.c
drivers/media/dvb-frontends/rtl2830.c
drivers/media/dvb-frontends/si2165.c
drivers/media/dvb-frontends/stv0299.c
drivers/media/dvb-frontends/stv6110x.c
drivers/media/dvb-frontends/stv6110x.h
drivers/media/dvb-frontends/stv6110x_priv.h
drivers/media/dvb-frontends/tda1004x.c
drivers/media/dvb-frontends/ts2020.c
drivers/media/i2c/adv7604.c
drivers/media/i2c/ir-kbd-i2c.c
drivers/media/i2c/msp3400-driver.c
drivers/media/i2c/msp3400-driver.h
drivers/media/i2c/mt9v011.c
drivers/media/i2c/mt9v032.c
drivers/media/i2c/ov2659.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/i2c/s5c73m3/s5c73m3-spi.c
drivers/media/i2c/s5k5baf.c
drivers/media/i2c/s5k6a3.c
drivers/media/i2c/saa7115.c
drivers/media/i2c/soc_camera/mt9m001.c
drivers/media/i2c/soc_camera/mt9t031.c
drivers/media/i2c/soc_camera/mt9v022.c
drivers/media/i2c/tc358743.c
drivers/media/i2c/tvp514x.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp7002.c
drivers/media/i2c/vpx3220.c
drivers/media/media-device.c
drivers/media/media-devnode.c
drivers/media/media-entity.c
drivers/media/pci/b2c2/flexcop-pci.c
drivers/media/pci/bt8xx/bttv-driver.c
drivers/media/pci/bt8xx/dvb-bt8xx.c
drivers/media/pci/ddbridge/ddbridge-core.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/pci/ngene/ngene-cards.c
drivers/media/pci/saa7134/saa7134-alsa.c
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/pci/saa7134/saa7134-go7007.c
drivers/media/pci/saa7134/saa7134-video.c
drivers/media/pci/ttpci/av7110.c
drivers/media/pci/ttpci/budget.c
drivers/media/platform/Kconfig
drivers/media/platform/Makefile
drivers/media/platform/coda/coda-bit.c
drivers/media/platform/davinci/dm644x_ccdc.c
drivers/media/platform/exynos-gsc/gsc-m2m.c
drivers/media/platform/exynos4-is/Kconfig
drivers/media/platform/exynos4-is/fimc-is.c
drivers/media/platform/exynos4-is/fimc-isp-video.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/exynos4-is/mipi-csis.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/ispccdc.c
drivers/media/platform/omap3isp/isppreview.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/platform/omap3isp/omap3isp.h
drivers/media/platform/rcar_jpu.c
drivers/media/platform/soc_camera/atmel-isi.c
drivers/media/platform/soc_camera/rcar_vin.c
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
drivers/media/platform/soc_camera/soc_camera.c
drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
drivers/media/platform/ti-vpe/Makefile
drivers/media/platform/ti-vpe/cal.c [new file with mode: 0644]
drivers/media/platform/ti-vpe/cal_regs.h [new file with mode: 0644]
drivers/media/platform/vim2m.c
drivers/media/platform/vivid/vivid-tpg.h
drivers/media/platform/vsp1/vsp1_drv.c
drivers/media/platform/vsp1/vsp1_video.c
drivers/media/radio/radio-si476x.c
drivers/media/radio/wl128x/fmdrv_common.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-core-priv.h
drivers/media/rc/rc-ir-raw.c
drivers/media/tuners/m88rs6000t.c
drivers/media/tuners/r820t.c
drivers/media/tuners/si2157.c
drivers/media/tuners/tuner-xc2028.c
drivers/media/usb/as102/as102_drv.h
drivers/media/usb/as102/as102_usb_drv.c
drivers/media/usb/au0828/au0828-core.c
drivers/media/usb/au0828/au0828-dvb.c
drivers/media/usb/b2c2/flexcop-usb.c
drivers/media/usb/cx231xx/cx231xx-417.c
drivers/media/usb/cx231xx/cx231xx-audio.c
drivers/media/usb/cx231xx/cx231xx-cards.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/dvb-usb-v2/af9035.h
drivers/media/usb/dvb-usb-v2/dvb_usb.h
drivers/media/usb/dvb-usb-v2/dvb_usb_common.h
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
drivers/media/usb/dvb-usb-v2/dvbsky.c
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
drivers/media/usb/dvb-usb-v2/mxl111sf.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/dvb-usb-v2/usb_urb.c
drivers/media/usb/dvb-usb/a800.c
drivers/media/usb/dvb-usb/cxusb.c
drivers/media/usb/dvb-usb/dib0700_core.c
drivers/media/usb/dvb-usb/dib0700_devices.c
drivers/media/usb/dvb-usb/dibusb-common.c
drivers/media/usb/dvb-usb/dibusb-mb.c
drivers/media/usb/dvb-usb/dibusb-mc.c
drivers/media/usb/dvb-usb/dibusb.h
drivers/media/usb/dvb-usb/digitv.c
drivers/media/usb/dvb-usb/dtt200u-fe.c
drivers/media/usb/dvb-usb/dtt200u.c
drivers/media/usb/dvb-usb/dtt200u.h
drivers/media/usb/dvb-usb/dvb-usb-common.h
drivers/media/usb/dvb-usb/dvb-usb-dvb.c
drivers/media/usb/dvb-usb/dvb-usb-firmware.c
drivers/media/usb/dvb-usb/dvb-usb-i2c.c
drivers/media/usb/dvb-usb/dvb-usb-init.c
drivers/media/usb/dvb-usb/dvb-usb-remote.c
drivers/media/usb/dvb-usb/dvb-usb-urb.c
drivers/media/usb/dvb-usb/dvb-usb.h
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/usb/dvb-usb/nova-t-usb2.c
drivers/media/usb/dvb-usb/technisat-usb2.c
drivers/media/usb/dvb-usb/ttusb2.c
drivers/media/usb/dvb-usb/umt-010.c
drivers/media/usb/dvb-usb/usb-urb.c
drivers/media/usb/dvb-usb/vp702x-fe.c
drivers/media/usb/dvb-usb/vp702x.c
drivers/media/usb/dvb-usb/vp7045-fe.c
drivers/media/usb/dvb-usb/vp7045.c
drivers/media/usb/dvb-usb/vp7045.h
drivers/media/usb/em28xx/em28xx-camera.c
drivers/media/usb/em28xx/em28xx-cards.c
drivers/media/usb/em28xx/em28xx-dvb.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/em28xx/em28xx.h
drivers/media/usb/go7007/go7007-priv.h
drivers/media/usb/go7007/go7007-usb.c
drivers/media/usb/hdpvr/hdpvr-core.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/usb/msi2500/msi2500.c
drivers/media/usb/pwc/pwc-if.c
drivers/media/usb/stk1160/stk1160-video.c
drivers/media/usb/usbtv/usbtv-video.c
drivers/media/usb/usbtv/usbtv.h
drivers/media/usb/usbvision/usbvision-video.c
drivers/media/v4l2-core/Kconfig
drivers/media/v4l2-core/tuner-core.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/media/v4l2-core/v4l2-of.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/memory/fsl_ifc.c
drivers/mfd/db8500-prcmu.c
drivers/misc/cxl/pci.c
drivers/misc/mei/main.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/mpc5121_nfc.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nuc900_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/s3c2410.c
drivers/mtd/nand/sunxi_nand.c
drivers/mtd/nand/vf610_nfc.c
drivers/mtd/onenand/onenand_bbt.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/mtk-quadspi.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ezchip/Kconfig
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_enet.h
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_devids.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/moxa/moxart_ether.h
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/samsung/sxgbe/Makefile
drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c [deleted file]
drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h [deleted file]
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/fddi/defxx.c
drivers/net/geneve.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/irda/bfin_sir.h
drivers/net/macvlan.c
drivers/net/phy/Kconfig
drivers/net/phy/dp83640.c
drivers/net/phy/phy.c
drivers/net/phy/smsc.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/usb/lan78xx.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/intel/iwlegacy/4965-mac.c
drivers/net/wireless/intel/iwlegacy/4965.h
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/dvm/led.c
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/power.c
drivers/net/wireless/intel/iwlwifi/mvm/quota.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/marvell/libertas/cmd.c
drivers/net/wireless/marvell/libertas/cmdresp.c
drivers/net/wireless/marvell/libertas/dev.h
drivers/net/wireless/marvell/libertas/if_sdio.c
drivers/net/wireless/marvell/libertas/if_usb.c
drivers/net/wireless/marvell/libertas/main.c
drivers/net/wireless/marvell/mwifiex/README
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/cmdevt.c
drivers/net/wireless/marvell/mwifiex/debugfs.c
drivers/net/wireless/marvell/mwifiex/decl.h
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/init.c
drivers/net/wireless/marvell/mwifiex/ioctl.h
drivers/net/wireless/marvell/mwifiex/join.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/pcie.h
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
drivers/net/wireless/marvell/mwifiex/wmm.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/mediatek/mt7601u/main.c
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.h
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00config.c
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt61pci.c
drivers/net/wireless/ralink/rt2x00/rt61pci.h
drivers/net/wireless/ralink/rt2x00/rt73usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/rc.c
drivers/net/wireless/realtek/rtlwifi/regd.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/st/cw1200/sta.c
drivers/net/wireless/st/cw1200/sta.h
drivers/net/wireless/ti/wlcore/Kconfig
drivers/net/wireless/ti/wlcore/event.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pfn_devs.c
drivers/nvmem/core.c
drivers/nvmem/qfprom.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/of_mdio.c
drivers/of/of_pci.c
drivers/pci/bus.c
drivers/pci/host/pci-imx6.c
drivers/pci/host/pci-layerscape.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-iproc.c
drivers/pci/host/pcie-rcar.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci.c
drivers/pci/pcie/aer/aer_inject.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv.h
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/pme.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pnp/quirks.c
drivers/ptp/ptp_ixp46x.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-max77686.c
drivers/rtc/rtc-max77802.c [deleted file]
drivers/s390/cio/chp.c
drivers/s390/cio/chp.h
drivers/s390/cio/chsc.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/scsi/hisi_sas/Kconfig
drivers/scsi/mac53c94.c
drivers/scsi/mesh.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/qcom/smd.c
drivers/soc/qcom/spm.c
drivers/soc/rockchip/pm_domains.c
drivers/soc/samsung/Kconfig [new file with mode: 0644]
drivers/soc/samsung/Makefile [new file with mode: 0644]
drivers/soc/samsung/exynos-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos-pmu.h [new file with mode: 0644]
drivers/soc/samsung/exynos-srom.c [new file with mode: 0644]
drivers/soc/samsung/exynos-srom.h [new file with mode: 0644]
drivers/soc/samsung/exynos3250-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos4-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos5250-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos5420-pmu.c [new file with mode: 0644]
drivers/soc/sunxi/sunxi_sram.c
drivers/soc/tegra/Kconfig
drivers/soc/tegra/pmc.c
drivers/spi/spi-rockchip.c
drivers/spmi/spmi-pmic-arb.c
drivers/ssb/main.c
drivers/staging/iio/adc/Kconfig
drivers/staging/iio/meter/ade7753.c
drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
drivers/staging/media/lirc/lirc_parallel.c
drivers/staging/mt29f_spinand/mt29f_spinand.h
drivers/staging/panel/panel.c
drivers/staging/rdma/Kconfig
drivers/staging/rdma/Makefile
drivers/staging/rdma/amso1100/Kbuild [deleted file]
drivers/staging/rdma/amso1100/Kconfig [deleted file]
drivers/staging/rdma/amso1100/TODO [deleted file]
drivers/staging/rdma/amso1100/c2.c [deleted file]
drivers/staging/rdma/amso1100/c2.h [deleted file]
drivers/staging/rdma/amso1100/c2_ae.c [deleted file]
drivers/staging/rdma/amso1100/c2_ae.h [deleted file]
drivers/staging/rdma/amso1100/c2_alloc.c [deleted file]
drivers/staging/rdma/amso1100/c2_cm.c [deleted file]
drivers/staging/rdma/amso1100/c2_cq.c [deleted file]
drivers/staging/rdma/amso1100/c2_intr.c [deleted file]
drivers/staging/rdma/amso1100/c2_mm.c [deleted file]
drivers/staging/rdma/amso1100/c2_mq.c [deleted file]
drivers/staging/rdma/amso1100/c2_mq.h [deleted file]
drivers/staging/rdma/amso1100/c2_pd.c [deleted file]
drivers/staging/rdma/amso1100/c2_provider.c [deleted file]
drivers/staging/rdma/amso1100/c2_provider.h [deleted file]
drivers/staging/rdma/amso1100/c2_qp.c [deleted file]
drivers/staging/rdma/amso1100/c2_rnic.c [deleted file]
drivers/staging/rdma/amso1100/c2_status.h [deleted file]
drivers/staging/rdma/amso1100/c2_user.h [deleted file]
drivers/staging/rdma/amso1100/c2_vq.c [deleted file]
drivers/staging/rdma/amso1100/c2_vq.h [deleted file]
drivers/staging/rdma/amso1100/c2_wr.h [deleted file]
drivers/staging/rdma/ehca/Kconfig [deleted file]
drivers/staging/rdma/ehca/Makefile [deleted file]
drivers/staging/rdma/ehca/TODO [deleted file]
drivers/staging/rdma/ehca/ehca_av.c [deleted file]
drivers/staging/rdma/ehca/ehca_classes.h [deleted file]
drivers/staging/rdma/ehca/ehca_classes_pSeries.h [deleted file]
drivers/staging/rdma/ehca/ehca_cq.c [deleted file]
drivers/staging/rdma/ehca/ehca_eq.c [deleted file]
drivers/staging/rdma/ehca/ehca_hca.c [deleted file]
drivers/staging/rdma/ehca/ehca_irq.c [deleted file]
drivers/staging/rdma/ehca/ehca_irq.h [deleted file]
drivers/staging/rdma/ehca/ehca_iverbs.h [deleted file]
drivers/staging/rdma/ehca/ehca_main.c [deleted file]
drivers/staging/rdma/ehca/ehca_mcast.c [deleted file]
drivers/staging/rdma/ehca/ehca_mrmw.c [deleted file]
drivers/staging/rdma/ehca/ehca_mrmw.h [deleted file]
drivers/staging/rdma/ehca/ehca_pd.c [deleted file]
drivers/staging/rdma/ehca/ehca_qes.h [deleted file]
drivers/staging/rdma/ehca/ehca_qp.c [deleted file]
drivers/staging/rdma/ehca/ehca_reqs.c [deleted file]
drivers/staging/rdma/ehca/ehca_sqp.c [deleted file]
drivers/staging/rdma/ehca/ehca_tools.h [deleted file]
drivers/staging/rdma/ehca/ehca_uverbs.c [deleted file]
drivers/staging/rdma/ehca/hcp_if.c [deleted file]
drivers/staging/rdma/ehca/hcp_if.h [deleted file]
drivers/staging/rdma/ehca/hcp_phyp.c [deleted file]
drivers/staging/rdma/ehca/hcp_phyp.h [deleted file]
drivers/staging/rdma/ehca/hipz_fns.h [deleted file]
drivers/staging/rdma/ehca/hipz_fns_core.h [deleted file]
drivers/staging/rdma/ehca/hipz_hw.h [deleted file]
drivers/staging/rdma/ehca/ipz_pt_fn.c [deleted file]
drivers/staging/rdma/ehca/ipz_pt_fn.h [deleted file]
drivers/staging/rdma/ipath/Kconfig [deleted file]
drivers/staging/rdma/ipath/Makefile [deleted file]
drivers/staging/rdma/ipath/TODO [deleted file]
drivers/staging/rdma/ipath/ipath_common.h [deleted file]
drivers/staging/rdma/ipath/ipath_cq.c [deleted file]
drivers/staging/rdma/ipath/ipath_debug.h [deleted file]
drivers/staging/rdma/ipath/ipath_diag.c [deleted file]
drivers/staging/rdma/ipath/ipath_dma.c [deleted file]
drivers/staging/rdma/ipath/ipath_driver.c [deleted file]
drivers/staging/rdma/ipath/ipath_eeprom.c [deleted file]
drivers/staging/rdma/ipath/ipath_file_ops.c [deleted file]
drivers/staging/rdma/ipath/ipath_fs.c [deleted file]
drivers/staging/rdma/ipath/ipath_iba6110.c [deleted file]
drivers/staging/rdma/ipath/ipath_init_chip.c [deleted file]
drivers/staging/rdma/ipath/ipath_intr.c [deleted file]
drivers/staging/rdma/ipath/ipath_kernel.h [deleted file]
drivers/staging/rdma/ipath/ipath_keys.c [deleted file]
drivers/staging/rdma/ipath/ipath_mad.c [deleted file]
drivers/staging/rdma/ipath/ipath_mmap.c [deleted file]
drivers/staging/rdma/ipath/ipath_mr.c [deleted file]
drivers/staging/rdma/ipath/ipath_qp.c [deleted file]
drivers/staging/rdma/ipath/ipath_rc.c [deleted file]
drivers/staging/rdma/ipath/ipath_registers.h [deleted file]
drivers/staging/rdma/ipath/ipath_ruc.c [deleted file]
drivers/staging/rdma/ipath/ipath_sdma.c [deleted file]
drivers/staging/rdma/ipath/ipath_srq.c [deleted file]
drivers/staging/rdma/ipath/ipath_stats.c [deleted file]
drivers/staging/rdma/ipath/ipath_sysfs.c [deleted file]
drivers/staging/rdma/ipath/ipath_uc.c [deleted file]
drivers/staging/rdma/ipath/ipath_ud.c [deleted file]
drivers/staging/rdma/ipath/ipath_user_pages.c [deleted file]
drivers/staging/rdma/ipath/ipath_user_sdma.c [deleted file]
drivers/staging/rdma/ipath/ipath_user_sdma.h [deleted file]
drivers/staging/rdma/ipath/ipath_verbs.c [deleted file]
drivers/staging/rdma/ipath/ipath_verbs.h [deleted file]
drivers/staging/rdma/ipath/ipath_verbs_mcast.c [deleted file]
drivers/staging/rdma/ipath/ipath_wc_ppc64.c [deleted file]
drivers/staging/rdma/ipath/ipath_wc_x86_64.c [deleted file]
drivers/staging/speakup/Kconfig
drivers/staging/speakup/main.c
drivers/staging/speakup/selection.c
drivers/staging/speakup/serialio.c
drivers/thermal/Kconfig
drivers/thermal/of-thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/spear_thermal.c
drivers/tty/n_tty.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/omap-serial.c
drivers/tty/tty_io.c
drivers/tty/tty_mutex.c
drivers/tty/vt/vt.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/core/hcd-pci.c
drivers/usb/core/hub.c
drivers/usb/dwc2/core.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/gadget.c
drivers/usb/host/xhci-ext-caps.h
drivers/usb/host/xhci-mtk-sch.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/ux500.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mxu11x0.c
drivers/usb/serial/option.c
drivers/usb/serial/visor.c
drivers/vfio/vfio.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/radeon_base.c
drivers/video/fbdev/imsttfb.c
drivers/video/fbdev/matrox/matroxfb_base.h
drivers/video/fbdev/offb.c
drivers/virtio/virtio_pci_common.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/max63xx_wdt.c
drivers/watchdog/pcwd_usb.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/sun4v_wdt.c [new file with mode: 0644]
drivers/xen/tmem.c
drivers/zorro/zorro-sysfs.c
fs/Kconfig
fs/Makefile
fs/block_dev.c
fs/btrfs/async-thread.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/reada.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/sysfs.c
fs/btrfs/sysfs.h
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tree-log.c
fs/ceph/file.c
fs/compat_ioctl.c
fs/dax.c
fs/devpts/inode.c
fs/direct-io.c
fs/ecryptfs/main.c
fs/eventpoll.c
fs/ext4/crypto.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/dir.c
fs/f2fs/extent_cache.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/gfs2/aops.c
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/super.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/ocfs2/aops.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/quota_global.c
fs/orangefs/Kconfig [new file with mode: 0644]
fs/orangefs/Makefile [new file with mode: 0644]
fs/orangefs/acl.c [new file with mode: 0644]
fs/orangefs/dcache.c [new file with mode: 0644]
fs/orangefs/devorangefs-req.c [new file with mode: 0644]
fs/orangefs/dir.c [new file with mode: 0644]
fs/orangefs/downcall.h [new file with mode: 0644]
fs/orangefs/file.c [new file with mode: 0644]
fs/orangefs/inode.c [new file with mode: 0644]
fs/orangefs/namei.c [new file with mode: 0644]
fs/orangefs/orangefs-bufmap.c [new file with mode: 0644]
fs/orangefs/orangefs-bufmap.h [new file with mode: 0644]
fs/orangefs/orangefs-cache.c [new file with mode: 0644]
fs/orangefs/orangefs-debug.h [new file with mode: 0644]
fs/orangefs/orangefs-debugfs.c [new file with mode: 0644]
fs/orangefs/orangefs-debugfs.h [new file with mode: 0644]
fs/orangefs/orangefs-dev-proto.h [new file with mode: 0644]
fs/orangefs/orangefs-kernel.h [new file with mode: 0644]
fs/orangefs/orangefs-mod.c [new file with mode: 0644]
fs/orangefs/orangefs-sysfs.c [new file with mode: 0644]
fs/orangefs/orangefs-sysfs.h [new file with mode: 0644]
fs/orangefs/orangefs-utils.c [new file with mode: 0644]
fs/orangefs/protocol.h [new file with mode: 0644]
fs/orangefs/super.c [new file with mode: 0644]
fs/orangefs/symlink.c [new file with mode: 0644]
fs/orangefs/upcall.h [new file with mode: 0644]
fs/orangefs/waitqueue.c [new file with mode: 0644]
fs/orangefs/xattr.c [new file with mode: 0644]
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/quota/dquot.c
fs/quota/quota.c
fs/quota/quota_tree.c
fs/quota/quota_v2.c
fs/reiserfs/super.c
fs/timerfd.c
fs/udf/dir.c
fs/udf/namei.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/udf/unicode.c
fs/xfs/libxfs/xfs_alloc_btree.c
fs/xfs/libxfs/xfs_attr_sf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_da_format.h
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_inode_buf.h
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/libxfs/xfs_quota_defs.h
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/libxfs/xfs_sb.h
fs/xfs/libxfs/xfs_shared.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_fsops.h
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_dquot.c
fs/xfs/xfs_trans_inode.c
include/acpi/cppc_acpi.h
include/asm-generic/fixmap.h
include/asm-generic/pci-bridge.h [deleted file]
include/asm-generic/pgtable.h
include/drm/drm_atomic_helper.h
include/drm/drm_cache.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_fixed.h
include/dt-bindings/clock/r8a7793-clock.h
include/dt-bindings/clock/rk3188-cru-common.h
include/dt-bindings/clock/tegra210-car.h
include/dt-bindings/power/rk3368-power.h [new file with mode: 0644]
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/bpf.h
include/linux/ceph/ceph_features.h
include/linux/cgroup-defs.h
include/linux/cleancache.h
include/linux/compiler-clang.h
include/linux/cpufreq.h
include/linux/cpuset.h
include/linux/crush/crush.h
include/linux/dax.h
include/linux/devpts_fs.h
include/linux/dmaengine.h
include/linux/dqblk_qtree.h
include/linux/f2fs_fs.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/gfp.h
include/linux/hrtimer.h
include/linux/ieee80211.h
include/linux/if_team.h
include/linux/iommu.h
include/linux/irqdomain.h
include/linux/libata.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/module.h
include/linux/mtd/bbm.h
include/linux/mtd/inftl.h
include/linux/mtd/nand.h
include/linux/mtd/nftl.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/of.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pfn_t.h
include/linux/platform_data/mtd-nand-s3c2410.h
include/linux/pm_opp.h
include/linux/psci.h
include/linux/qcom_scm.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/radix-tree.h
include/linux/raid/pq.h
include/linux/rfkill.h
include/linux/rmap.h
include/linux/skbuff.h
include/linux/soc/qcom/smd.h
include/linux/soc/qcom/smem_state.h
include/linux/soc/samsung/exynos-pmu.h [moved from arch/arm/mach-exynos/exynos-pmu.h with 81% similarity]
include/linux/soc/samsung/exynos-regs-pmu.h [moved from arch/arm/mach-exynos/regs-pmu.h with 99% similarity]
include/linux/swiotlb.h
include/linux/tcp.h
include/linux/tty.h
include/linux/workqueue.h
include/media/tuner.h
include/media/v4l2-mc.h [new file with mode: 0644]
include/media/videobuf2-core.h
include/net/af_unix.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bond_3ad.h
include/net/cfg80211.h
include/net/dst_metadata.h
include/net/ip6_route.h
include/net/ip_tunnels.h
include/net/iw_handler.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_core.h
include/net/netns/ipv4.h
include/net/scm.h
include/net/sctp/structs.h
include/net/sock.h
include/net/sock_reuseport.h
include/net/tcp.h
include/net/vxlan.h
include/sound/rawmidi.h
include/trace/events/fence.h
include/trace/events/power.h
include/trace/events/sunvnet.h [new file with mode: 0644]
include/uapi/drm/etnaviv_drm.h
include/uapi/linux/bpf.h
include/uapi/linux/dqblk_xfs.h
include/uapi/linux/ethtool.h
include/uapi/linux/fs.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/media.h
include/uapi/linux/nl80211.h
include/uapi/linux/quota.h
include/uapi/linux/rfkill.h
include/uapi/linux/v4l2-common.h
kernel/bpf/arraymap.c
kernel/bpf/hashtab.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/ring_buffer.c
kernel/futex.c
kernel/irq/handle.c
kernel/irq/irqdomain.c
kernel/locking/rtmutex.c
kernel/memremap.c
kernel/module.c
kernel/pid.c
kernel/power/Kconfig
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/seccomp.c
kernel/signal.c
kernel/time/hrtimer.c
kernel/time/itimer.c
kernel/time/ntp.c
kernel/time/posix-timers.c
kernel/time/tick-sched.c
kernel/time/timer_list.c
kernel/trace/bpf_trace.c
kernel/trace/power-traces.c
kernel/trace/trace.c
kernel/trace/trace_stack.c
kernel/workqueue.c
lib/Kconfig.debug
lib/debugobjects.c
lib/dump_stack.c
lib/klist.c
lib/radix-tree.c
lib/scatterlist.c
lib/test-string_helpers.c
mm/Kconfig
mm/backing-dev.c
mm/cleancache.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/memblock.c
mm/memory.c
mm/mempolicy.c
mm/mmap.c
mm/page_alloc.c
mm/util.c
mm/vmpressure.c
mm/vmscan.c
mm/vmstat.c
net/batman-adv/Kconfig
net/batman-adv/Makefile
net/batman-adv/bat_algo.h
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/hci_core.c
net/bluetooth/hci_request.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/leds.c [new file with mode: 0644]
net/bluetooth/leds.h [new file with mode: 0644]
net/bluetooth/smp.c
net/bridge/br.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/ceph/crush/mapper.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/dev.c
net/core/ethtool.c
net/core/flow_dissector.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/scm.c
net/core/skbuff.c
net/core/sock_reuseport.c
net/core/sysctl_net_core.c
net/ipv4/Kconfig
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_tunnel.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/ircomm/ircomm_param.c
net/iucv/af_iucv.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_netdev.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_byteorder.c
net/netfilter/nft_ct.c
net/netfilter/xt_TCPMSS.c
net/netlink/af_netlink.c
net/openvswitch/vport-vxlan.c
net/packet/af_packet.c
net/rfkill/core.c
net/sched/sch_drr.c
net/sctp/input.c
net/sctp/proc.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/switchdev/switchdev.c
net/tipc/link.c
net/tipc/link.h
net/tipc/name_table.c
net/tipc/node.c
net/tipc/server.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
net/unix/garbage.c
net/wireless/Kconfig
net/wireless/core.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/sme.c
net/wireless/util.c
net/wireless/wext-core.c
samples/bpf/test_maps.c
samples/bpf/tracex2_kern.c
samples/bpf/tracex2_user.c
samples/bpf/tracex3_kern.c
samples/bpf/tracex3_user.c
scripts/Makefile.dtbinst
scripts/kconfig/Makefile
scripts/kconfig/confdata.c
scripts/link-vmlinux.sh
scripts/mod/modpost.c
scripts/prune-kernel [new file with mode: 0755]
security/keys/key.c
security/selinux/nlmsgtab.c
sound/core/Kconfig
sound/core/compress_offload.c
sound/core/oss/pcm_oss.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/core/seq/oss/seq_oss_synth.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_ports.c
sound/core/seq/seq_timer.c
sound/core/seq/seq_virmidi.c
sound/core/timer.c
sound/drivers/dummy.c
sound/firewire/bebob/bebob_stream.c
sound/firewire/digi00x/amdtp-dot.c
sound/firewire/tascam/tascam-transaction.c
sound/firewire/tascam/tascam.c
sound/firewire/tascam/tascam.h
sound/isa/Kconfig
sound/pci/Kconfig
sound/pci/emu10k1/emu10k1_main.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_jack.c
sound/pci/hda/hda_jack.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/ppc/pmac.c
sound/sparc/Kconfig
sound/usb/quirks.c
tools/perf/Makefile.perf
tools/perf/arch/x86/tests/intel-cqm.c
tools/perf/config/Makefile
tools/perf/tests/make
tools/perf/ui/browsers/annotate.c
tools/perf/util/hist.c
tools/perf/util/session.c
tools/perf/util/stat.c
tools/perf/util/symbol.c
tools/testing/nvdimm/test/iomap.c
tools/testing/selftests/timers/valid-adjtimex.c
tools/virtio/asm/barrier.h
tools/virtio/linux/compiler.h [new file with mode: 0644]
tools/virtio/linux/kernel.h
tools/virtio/ringtest/Makefile [new file with mode: 0644]
tools/virtio/ringtest/README [new file with mode: 0644]
tools/virtio/ringtest/main.c [new file with mode: 0644]
tools/virtio/ringtest/main.h [new file with mode: 0644]
tools/virtio/ringtest/ring.c [new file with mode: 0644]
tools/virtio/ringtest/run-on-all.sh [new file with mode: 0755]
tools/virtio/ringtest/virtio_ring_0_9.c [new file with mode: 0644]
tools/virtio/ringtest/virtio_ring_poll.c [new file with mode: 0644]

index b1e9a97653dc64853775a97db377a8f269cc8d95..7e6c5334c337ae0c792a36ec5a2e9309b4128061 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
+Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
 Archit Taneja <archit@ti.com>
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
index ff60ad9eca4c7ba390e22bd0eda0378bfbe236f1..e736d145085f302b031f4aa40ff3ca0e0c5fe060 100644 (file)
@@ -18,12 +18,3 @@ Values:      A numeric value.
                2: RFKILL_STATE_HARD_BLOCKED
                        transmitter is forced off by something outside of
                        the driver's control.
-
-What:          /sys/class/rfkill/rfkill[0-9]+/claim
-Date:          09-Jul-2007
-KernelVersion  v2.6.22
-Contact:       linux-wireless@vger.kernel.org
-Description:   This file is deprecated because there no longer is a way to
-               claim just control over a single rfkill instance.
-               This file is scheduled to be removed in 2012.
-Values:        0: Kernel handles events
diff --git a/Documentation/ABI/removed/sysfs-class-rfkill b/Documentation/ABI/removed/sysfs-class-rfkill
new file mode 100644 (file)
index 0000000..3ce6231
--- /dev/null
@@ -0,0 +1,13 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+What:          /sys/class/rfkill/rfkill[0-9]+/claim
+Date:          09-Jul-2007
+KernelVersion  v2.6.22
+Contact:       linux-wireless@vger.kernel.org
+Description:   This file was deprecated because there no longer was a way to
+               claim just control over a single rfkill instance.
+               This file was scheduled to be removed in 2012, and was removed
+               in 2016.
+Values:        0: Kernel handles events
diff --git a/Documentation/ABI/stable/sysfs-fs-orangefs b/Documentation/ABI/stable/sysfs-fs-orangefs
new file mode 100644 (file)
index 0000000..affdb11
--- /dev/null
@@ -0,0 +1,87 @@
+What:                  /sys/fs/orangefs/perf_counters/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Counters and settings for various caches.
+                       Read only.
+
+
+What:                  /sys/fs/orangefs/perf_counter_reset
+Date:                  June 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       echo a 0 or a 1 into perf_counter_reset to
+                       reset all the counters in
+                       /sys/fs/orangefs/perf_counters
+                       except ones with PINT_PERF_PRESERVE set.
+
+
+What:                  /sys/fs/orangefs/perf_time_interval_secs
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Length of perf counter intervals in
+                       seconds.
+
+
+What:                  /sys/fs/orangefs/perf_history_size
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       The perf_counters cache statistics have N, or
+                       perf_history_size, samples. The default is
+                       one.
+
+                       Every perf_time_interval_secs the (first)
+                       samples are reset.
+
+                       If N is greater than one, the "current" set
+                       of samples is reset, and the samples from the
+                       other N-1 intervals remain available.
+
+
+What:                  /sys/fs/orangefs/op_timeout_secs
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Service operation timeout in seconds.
+
+
+What:                  /sys/fs/orangefs/slot_timeout_secs
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       "Slot" timeout in seconds. A "slot"
+                       is an indexed buffer in the shared
+                       memory segment used for communication
+                       between the kernel module and userspace.
+                       Slots are requested and waited for,
+                       the wait times out after slot_timeout_secs.
+
+
+What:                  /sys/fs/orangefs/acache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Attribute cache configurable settings.
+
+
+What:                  /sys/fs/orangefs/ncache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Name cache configurable settings.
+
+
+What:                  /sys/fs/orangefs/capcache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Capability cache configurable settings.
+
+
+What:                  /sys/fs/orangefs/ccache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Credential cache configurable settings.
index e5200f354abfe933d3fbe69f919da2dcc0e585a6..a809f6005f1464ed7c86133db7b4b95d4a3c7388 100644 (file)
@@ -98,3 +98,17 @@ Date:                October 2015
 Contact:       "Chao Yu" <chao2.yu@samsung.com>
 Description:
                 Controls the count of nid pages to be readaheaded.
+
+What:          /sys/fs/f2fs/<disk>/dirty_nats_ratio
+Date:          January 2016
+Contact:       "Chao Yu" <chao2.yu@samsung.com>
+Description:
+                Controls dirty nat entries ratio threshold, if current
+                ratio exceeds configured threshold, checkpoint will
+                be triggered for flushing dirty nat entries.
+
+What:          /sys/fs/f2fs/<disk>/lifetime_write_kbytes
+Date:          January 2016
+Contact:       "Shuoran Liu" <liushuoran@huawei.com>
+Description:
+                Shows total written kbytes issued to disk.
index cdd8b24db68d3efc7f9a8d85130c8b851b2f963b..cc303a2f641cb59e7d8ab3c78c4f38751474d43d 100644 (file)
@@ -229,6 +229,7 @@ X!Isound/sound_firmware.c
 !Iinclude/media/v4l2-dv-timings.h
 !Iinclude/media/v4l2-event.h
 !Iinclude/media/v4l2-flash-led-class.h
+!Iinclude/media/v4l2-mc.h
 !Iinclude/media/v4l2-mediabus.h
 !Iinclude/media/v4l2-mem2mem.h
 !Iinclude/media/v4l2-of.h
index 63152ab9efbad0b10c59105effc55c49cc02d488..e0d49fa329f095bdc35ff643e4cac206232f2e03 100644 (file)
@@ -48,9 +48,6 @@
 
   <refsect1>
     <title>Description</title>
-
-    <para><emphasis role="bold">NOTE:</emphasis> This new ioctl is programmed to be added on Kernel 4.6. Its definition/arguments may change until its final version.</para>
-
     <para>The typical usage of this ioctl is to call it twice.
     On the first call, the structure defined at &media-v2-topology; should
     be zeroed. At return, if no errors happen, this ioctl will return the
index 1af3842509105ea45fd815ad953781d901b03bfe..751c3d0271036544dae44457fdab0f8767830ef7 100644 (file)
          </row>
          <row>
            <entry><constant>MEDIA_ENT_F_TUNER</constant></entry>
-           <entry>Digital TV, analog TV, radio and/or software radio tuner.</entry>
+           <entry>Digital TV, analog TV, radio and/or software radio tuner,
+                  with consists on a PLL tuning stage that converts radio
+                  frequency (RF) signal into an Intermediate Frequency (IF).
+                  Modern tuners have internally IF-PLL decoders for audio
+                  and video, but older models have those stages implemented
+                  on separate entities.
+           </entry>
+         </row>
+         <row>
+           <entry><constant>MEDIA_ENT_F_IF_VID_DECODER</constant></entry>
+           <entry>IF-PLL video decoder. It receives the IF from a PLL
+                  and decodes the analog TV video signal. This is commonly
+                  found on some very old analog tuners, like Philips MK3
+                  designs. They all contain a tda9887 (or some software
+                  compatible similar chip, like tda9885). Those devices
+                  use a different I2C address than the tuner PLL.
+           </entry>
+         </row>
+         <row>
+           <entry><constant>MEDIA_ENT_F_IF_AUD_DECODER</constant></entry>
+           <entry>IF-PLL sound decoder. It receives the IF from a PLL
+                  and decodes the analog TV audio signal. This is commonly
+                  found on some very old analog hardware, like Micronas
+                  msp3400, Philips tda9840, tda985x, etc. Those devices
+                  use a different I2C address than the tuner PLL and
+                  should be controlled together with the IF-PLL video
+                  decoder.
+           </entry>
          </row>
        </tbody>
       </tgroup>
index e781cc61786c52b93f3f2973110f6040c56dac0e..7d13fe96657d6fef3ab8fa51bf54ec60a7f697fa 100644 (file)
@@ -1,35 +1,43 @@
-    <refentry id="V4L2-PIX-FMT-YUV420M">
+    <refentry>
       <refmeta>
-       <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12')</refentrytitle>
+       <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12'), V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle>
        &manvol;
       </refmeta>
       <refnamediv>
-       <refname> <constant>V4L2_PIX_FMT_YUV420M</constant></refname>
-       <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant>
-         with planes non contiguous in memory. </refpurpose>
+       <refname id="V4L2-PIX-FMT-YUV420M"><constant>V4L2_PIX_FMT_YUV420M</constant></refname>
+       <refname id="V4L2-PIX-FMT-YVU420M"><constant>V4L2_PIX_FMT_YVU420M</constant></refname>
+       <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant> and
+         <constant>V4L2_PIX_FMT_YVU420</constant> with planes non contiguous
+         in memory.</refpurpose>
       </refnamediv>
 
       <refsect1>
        <title>Description</title>
 
        <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub- images or planes.
+The three components are separated into three sub-images or planes.</para>
 
-The Y plane is first. The Y plane has one byte per pixel. The Cb data
+       <para>The Y plane is first. The Y plane has one byte per pixel.
+For <constant>V4L2_PIX_FMT_YUV420M</constant> the Cb data
 constitutes the second plane which is half the width and half
 the height of the Y plane (and of the image). Each Cb belongs to four
 pixels, a two-by-two square of the image. For example,
 Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
 Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
 Y'<subscript>11</subscript>. The Cr data, just like the Cb plane, is
-in the third plane. </para>
+in the third plane.</para>
+
+       <para><constant>V4L2_PIX_FMT_YVU420M</constant> is the same except
+the Cr data is stored in the second plane and the Cb data in the third plane.
+</para>
 
        <para>If the Y plane has pad bytes after each row, then the Cb
 and Cr planes have half as many pad bytes after their rows. In other
 words, two Cx rows (including padding) is exactly as long as one Y row
 (including padding).</para>
 
-       <para><constant>V4L2_PIX_FMT_YUV420M</constant> is intended to be
+       <para><constant>V4L2_PIX_FMT_YUV420M</constant> and
+<constant>V4L2_PIX_FMT_YVU420M</constant> are intended to be
 used only in drivers and applications that support the multi-planar API,
 described in <xref linkend="planar-apis"/>. </para>
 
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml
deleted file mode 100644 (file)
index 2330667..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-    <refentry id="V4L2-PIX-FMT-YVU420M">
-      <refmeta>
-       <refentrytitle>V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle>
-       &manvol;
-      </refmeta>
-      <refnamediv>
-       <refname> <constant>V4L2_PIX_FMT_YVU420M</constant></refname>
-       <refpurpose>Variation of <constant>V4L2_PIX_FMT_YVU420</constant>
-         with planes non contiguous in memory. </refpurpose>
-      </refnamediv>
-
-      <refsect1>
-       <title>Description</title>
-
-       <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub-images or planes.
-
-The Y plane is first. The Y plane has one byte per pixel. The Cr data
-constitutes the second plane which is half the width and half
-the height of the Y plane (and of the image). Each Cr belongs to four
-pixels, a two-by-two square of the image. For example,
-Cr<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
-Y'<subscript>11</subscript>. The Cb data, just like the Cr plane, constitutes
-the third plane. </para>
-
-       <para>If the Y plane has pad bytes after each row, then the Cr
-and Cb planes have half as many pad bytes after their rows. In other
-words, two Cx rows (including padding) is exactly as long as one Y row
-(including padding).</para>
-
-       <para><constant>V4L2_PIX_FMT_YVU420M</constant> is intended to be
-used only in drivers and applications that support the multi-planar API,
-described in <xref linkend="planar-apis"/>. </para>
-
-       <example>
-         <title><constant>V4L2_PIX_FMT_YVU420M</constant> 4 &times; 4
-pixel image</title>
-
-         <formalpara>
-           <title>Byte Order.</title>
-           <para>Each cell is one byte.
-               <informaltable frame="none">
-               <tgroup cols="5" align="center">
-                 <colspec align="left" colwidth="2*" />
-                 <tbody valign="top">
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;0:</entry>
-                     <entry>Y'<subscript>00</subscript></entry>
-                     <entry>Y'<subscript>01</subscript></entry>
-                     <entry>Y'<subscript>02</subscript></entry>
-                     <entry>Y'<subscript>03</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;4:</entry>
-                     <entry>Y'<subscript>10</subscript></entry>
-                     <entry>Y'<subscript>11</subscript></entry>
-                     <entry>Y'<subscript>12</subscript></entry>
-                     <entry>Y'<subscript>13</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;8:</entry>
-                     <entry>Y'<subscript>20</subscript></entry>
-                     <entry>Y'<subscript>21</subscript></entry>
-                     <entry>Y'<subscript>22</subscript></entry>
-                     <entry>Y'<subscript>23</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;12:</entry>
-                     <entry>Y'<subscript>30</subscript></entry>
-                     <entry>Y'<subscript>31</subscript></entry>
-                     <entry>Y'<subscript>32</subscript></entry>
-                     <entry>Y'<subscript>33</subscript></entry>
-                   </row>
-                   <row><entry></entry></row>
-                   <row>
-                     <entry>start1&nbsp;+&nbsp;0:</entry>
-                     <entry>Cr<subscript>00</subscript></entry>
-                     <entry>Cr<subscript>01</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start1&nbsp;+&nbsp;2:</entry>
-                     <entry>Cr<subscript>10</subscript></entry>
-                     <entry>Cr<subscript>11</subscript></entry>
-                   </row>
-                   <row><entry></entry></row>
-                   <row>
-                     <entry>start2&nbsp;+&nbsp;0:</entry>
-                     <entry>Cb<subscript>00</subscript></entry>
-                     <entry>Cb<subscript>01</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start2&nbsp;+&nbsp;2:</entry>
-                     <entry>Cb<subscript>10</subscript></entry>
-                     <entry>Cb<subscript>11</subscript></entry>
-                   </row>
-                 </tbody>
-               </tgroup>
-               </informaltable>
-             </para>
-         </formalpara>
-
-         <formalpara>
-           <title>Color Sample Location.</title>
-           <para>
-               <informaltable frame="none">
-               <tgroup cols="7" align="center">
-                 <tbody valign="top">
-                   <row>
-                     <entry></entry>
-                     <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
-                     <entry>2</entry><entry></entry><entry>3</entry>
-                   </row>
-                   <row>
-                     <entry>0</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                   <row>
-                     <entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry><entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry>
-                   </row>
-                   <row>
-                     <entry>1</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                   <row>
-                     <entry></entry>
-                   </row>
-                   <row>
-                     <entry>2</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                   <row>
-                     <entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry><entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry>
-                   </row>
-                   <row>
-                     <entry>3</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                 </tbody>
-               </tgroup>
-               </informaltable>
-             </para>
-         </formalpara>
-       </example>
-      </refsect1>
-    </refentry>
index d871245d29735a0084ea422539a0dc23f106ca10..9e77ff353febe87d1e8ba66b6159c78e656f716a 100644 (file)
@@ -1628,7 +1628,6 @@ information.</para>
     &sub-y41p;
     &sub-yuv420;
     &sub-yuv420m;
-    &sub-yvu420m;
     &sub-yuv410;
     &sub-yuv422p;
     &sub-yuv411p;
index e9c70a8f34762b878ee67aa51a57d568ce5d6791..0c93677d16b449b931f5b9cb7591bd184be39fa2 100644 (file)
@@ -60,9 +60,19 @@ input</refpurpose>
 automatically, similar to sensing the video standard. To do so, applications
 call <constant>VIDIOC_QUERY_DV_TIMINGS</constant> with a pointer to a
 &v4l2-dv-timings;. Once the hardware detects the timings, it will fill in the
-timings structure.
+timings structure.</para>
 
-If the timings could not be detected because there was no signal, then
+<para>Please note that drivers shall <emphasis>not</emphasis> switch timings automatically
+if new timings are detected. Instead, drivers should send the
+<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect
+that userspace will take action by calling <constant>VIDIOC_QUERY_DV_TIMINGS</constant>.
+The reason is that new timings usually mean different buffer sizes as well, and you
+cannot change buffer sizes on the fly. In general, applications that receive the
+Source Change event will have to call <constant>VIDIOC_QUERY_DV_TIMINGS</constant>,
+and if the detected timings are valid they will have to stop streaming, set the new
+timings, allocate new buffers and start streaming again.</para>
+
+<para>If the timings could not be detected because there was no signal, then
 <errorcode>ENOLINK</errorcode> is returned. If a signal was detected, but
 it was unstable and the receiver could not lock to the signal, then
 <errorcode>ENOLCK</errorcode> is returned. If the receiver could lock to the signal,
index 222348542182bfb06b5019bbac2ea023d06bab02..3ceae35fab0317e22fe891ce8c895346f527d223 100644 (file)
@@ -59,6 +59,16 @@ then the driver will return V4L2_STD_UNKNOWN. When detection is not
 possible or fails, the set must contain all standards supported by the
 current video input or output.</para>
 
+<para>Please note that drivers shall <emphasis>not</emphasis> switch the video standard
+automatically if a new video standard is detected. Instead, drivers should send the
+<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect
+that userspace will take action by calling <constant>VIDIOC_QUERYSTD</constant>.
+The reason is that a new video standard can mean different buffer sizes as well, and you
+cannot change buffer sizes on the fly. In general, applications that receive the
+Source Change event will have to call <constant>VIDIOC_QUERYSTD</constant>,
+and if the detected video standard is valid they will have to stop streaming, set the new
+standard, allocate new buffers and start streaming again.</para>
+
   </refsect1>
 
   <refsect1>
index 7b57fc087088f49756eeb8eaabf403bfbbd92b93..49585b6e1ea24c951ea58090b762740aa36b066d 100644 (file)
@@ -3,7 +3,7 @@ Linux IOMMU Support
 
 The architecture spec can be obtained from the below location.
 
-http://www.intel.com/technology/virtualization/
+http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
 
 This guide gives a quick cheat sheet for some basic understanding.
 
index 430d279a8df374883f5038d0f86961843928f2a7..e5a115f244717aa93122a07d8bc3dc968833f588 100644 (file)
@@ -72,6 +72,5 @@ SunXi family
 
     * Octa ARM Cortex-A7 based SoCs
       - Allwinner A83T
-        + Not Supported
         + Datasheet
           http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
index 701d39d3171a74d8c2eb670c0b1be2931f326ee8..56d6d8b796db6dd3aadd252a85cc4b691f9e20f7 100644 (file)
@@ -109,7 +109,13 @@ Header notes:
                        1 - 4K
                        2 - 16K
                        3 - 64K
-  Bits 3-63:   Reserved.
+  Bit 3:       Kernel physical placement
+                       0 - 2MB aligned base should be as close as possible
+                           to the base of DRAM, since memory below it is not
+                           accessible via the linear mapping
+                       1 - 2MB aligned base may be anywhere in physical
+                           memory
+  Bits 4-63:   Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
   memory as possible free for use by the kernel immediately after the
@@ -117,14 +123,14 @@ Header notes:
   depending on selected features, and is effectively unbound.
 
 The Image must be placed text_offset bytes from a 2MB aligned base
-address near the start of usable system RAM and called there. Memory
-below that base address is currently unusable by Linux, and therefore it
-is strongly recommended that this location is the start of system RAM.
-The region between the 2 MB aligned base address and the start of the
-image has no special significance to the kernel, and may be used for
-other purposes.
+address anywhere in usable system RAM and called there. The region
+between the 2 MB aligned base address and the start of the image has no
+special significance to the kernel, and may be used for other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
+NOTE: versions prior to v4.6 cannot make use of memory below the
+physical offset of the Image so it is recommended that the Image be
+placed as close as possible to the start of system RAM.
 
 Any memory described to the kernel (even that below the start of the
 image) which is not marked as reserved from the kernel (e.g., with a
index 65b3eac8856cf7ec26b341b217046a2fb9b7bf26..ff49cf901148d895b765800ec6ddb79c0e38ed53 100644 (file)
@@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and
 conventions of cgroup v2.  It describes all userland-visible aspects
 of cgroup including core and specific controller behaviors.  All
 future changes must be reflected in this document.  Documentation for
-v1 is available under Documentation/cgroup-legacy/.
+v1 is available under Documentation/cgroup-v1/.
 
 CONTENTS
 
@@ -843,6 +843,10 @@ PAGE_SIZE multiple when read back.
                Amount of memory used to cache filesystem data,
                including tmpfs and shared memory.
 
+         sock
+
+               Amount of memory used in network transmission buffers
+
          file_mapped
 
                Amount of cached filesystem data mapped with mmap()
index ae9be074d09f66d503adccc539c892e6cf13af90..a0884b85abf2cfccab76126f335352a9c6c44979 100644 (file)
@@ -178,6 +178,7 @@ nodes to be present and contain the properties described below.
                            "marvell,sheeva-v5"
                            "nvidia,tegra132-denver"
                            "qcom,krait"
+                           "qcom,kryo"
                            "qcom,scorpion"
        - enable-method
                Value type: <stringlist>
index 3090a8a008c03ed61cfb42b94d2cf36f3b5756ed..48f6703a28c8f50108a1d9f3bdda62462d9b099e 100644 (file)
@@ -22,6 +22,8 @@ SoCs:
    compatible = "ti,k2l", "ti,keystone"
 - Keystone 2 Edison
    compatible = "ti,k2e", "ti,keystone"
+- K2G
+   compatible = "ti,k2g", "ti,keystone"
 
 Boards:
 -  Keystone 2 Hawking/Kepler EVM
@@ -32,3 +34,6 @@ Boards:
 
 -  Keystone 2 Edison EVM
    compatible = "ti,k2e-evm", "ti,k2e", "ti,keystone"
+
+-  K2G EVM
+   compatible = "ti,k2g-evm", "ti,k2g", "ti-keystone"
index ab0c9cdf388e9e5769bf7ecfa88f5bbc869b58fb..7d28fe4bf654e8d1fa9c1a3cc06a8fab0631ef9a 100644 (file)
@@ -19,9 +19,12 @@ SoC. Currently known SoC compatibles are:
 And in addition, the compatible shall be extended with the specific
 board. Currently known boards are:
 
+"buffalo,linkstation-lsqvl"
+"buffalo,linkstation-lsvl"
+"buffalo,linkstation-lswsxl"
+"buffalo,linkstation-lswxl"
+"buffalo,linkstation-lswvl"
 "buffalo,lschlv2"
-"buffalo,lswvl"
-"buffalo,lswxl"
 "buffalo,lsxhl"
 "buffalo,lsxl"
 "cloudengines,pogo02"
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-srom.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-srom.txt
new file mode 100644 (file)
index 0000000..e5c18df
--- /dev/null
@@ -0,0 +1,81 @@
+SAMSUNG Exynos SoCs SROM Controller driver.
+
+Required properties:
+- compatible : Should contain "samsung,exynos-srom".
+
+- reg: offset and length of the register set
+
+Optional properties:
+The SROM controller can be used to attach external peripherals. In this case
+extra properties, describing the bus behind it, should be specified as below:
+
+- #address-cells: Must be set to 2 to allow device address translation.
+                 Address is specified as (bank#, offset).
+
+- #size-cells: Must be set to 1 to allow device size passing
+
+- ranges: Must be set up to reflect the memory layout with four integer values
+         per bank:
+               <bank-number> 0 <parent address of bank> <size>
+
+Sub-nodes:
+The actual device nodes should be added as subnodes to the SROMc node. These
+subnodes, except regular device specification, should contain the following
+properties, describing configuration of the relevant SROM bank:
+
+Required properties:
+- reg: bank number, base address (relative to start of the bank) and size of
+       the memory mapped for the device. Note that base address will be
+       typically 0 as this is the start of the bank.
+
+- samsung,srom-timing : array of 6 integers, specifying bank timings in the
+                        following order: Tacp, Tcah, Tcoh, Tacc, Tcos, Tacs.
+                        Each value is specified in cycles and has the following
+                        meaning and valid range:
+                        Tacp : Page mode access cycle at Page mode (0 - 15)
+                        Tcah : Address holding time after CSn (0 - 15)
+                        Tcoh : Chip selection hold on OEn (0 - 15)
+                        Tacc : Access cycle (0 - 31, the actual time is N + 1)
+                        Tcos : Chip selection set-up before OEn (0 - 15)
+                        Tacs : Address set-up before CSn (0 - 15)
+
+Optional properties:
+- reg-io-width : data width in bytes (1 or 2). If omitted, default of 1 is used.
+
+- samsung,srom-page-mode : page mode configuration for the bank:
+                          0 - normal (one data)
+                          1 - four data
+                          If omitted, default of 0 is used.
+
+Example: basic definition, no banks are configured
+       sromc@12570000 {
+               compatible = "samsung,exynos-srom";
+               reg = <0x12570000 0x14>;
+       };
+
+Example: SROMc with SMSC911x ethernet chip on bank 3
+       sromc@12570000 {
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0 0x04000000 0x20000   // Bank0
+                         1 0 0x05000000 0x20000   // Bank1
+                         2 0 0x06000000 0x20000   // Bank2
+                         3 0 0x07000000 0x20000>; // Bank3
+
+               compatible = "samsung,exynos-srom";
+               reg = <0x12570000 0x14>;
+
+               ethernet@3,0 {
+                       compatible = "smsc,lan9115";
+                       reg = <3 0 0x10000>;       // Bank 3, offset = 0
+                       phy-mode = "mii";
+                       interrupt-parent = <&gpx0>;
+                       interrupts = <5 8>;
+                       reg-io-width = <2>;
+                       smsc,irq-push-pull;
+                       smsc,force-internal-phy;
+
+                       samsung,srom-page-mode = <1>;
+                       samsung,srom-timing = <9 12 1 9 1 1>;
+               };
+       };
index bb9b0faa919d098309c9eb22289f8cef3b995d9b..7e79fcc36b0db60c8b147bf7a498d8013c120d16 100644 (file)
@@ -11,5 +11,6 @@ using one of the following compatible strings:
   allwinner,sun7i-a20
   allwinner,sun8i-a23
   allwinner,sun8i-a33
+  allwinner,sun8i-a83t
   allwinner,sun8i-h3
   allwinner,sun9i-a80
index e59f57b24777c473c5c92faefbc0bec1ec17ecb9..966dcaffcb9cbb09d288a968cd7cb3faae3019eb 100644 (file)
@@ -39,6 +39,7 @@ Required properties:
        "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
        "allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
        "allwinner,sun9i-a80-apb0-clk" - for the APB0 bus clock on A80
+       "allwinner,sun8i-a83t-apb0-gates-clk" - for the APB0 gates on A83T
        "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
        "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
        "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
@@ -57,6 +58,7 @@ Required properties:
        "allwinner,sun9i-a80-apb1-gates-clk" - for the APB1 gates on A80
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
+       "allwinner,sun8i-a83t-bus-gates-clk" - for the bus gates on A83T
        "allwinner,sun8i-h3-bus-gates-clk" - for the bus gates on H3
        "allwinner,sun9i-a80-apbs-gates-clk" - for the APBS gates on A80
        "allwinner,sun4i-a10-dram-gates-clk" - for the DRAM gates on A10
@@ -86,7 +88,7 @@ Required properties for all clocks:
 - #clock-cells : from common clock binding; shall be set to 0 except for
        the following compatibles where it shall be set to 1:
        "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
+       "allwinner,sun4i-pll6-clk",
        "allwinner,*-usb-clk", "allwinner,*-mmc-clk",
        "allwinner,*-mmc-config-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
index 267565894db929013c88d75be65093f693546b8b..db7e2260f9c5749e510a19e6d275e7bbfdaff467 100644 (file)
@@ -15,6 +15,7 @@ Optional properties:
     cells in the dmas property of client device.
   - dma-channels: contains the total number of DMA channels supported by the DMAC
   - dma-requests: contains the total number of DMA requests supported by the DMAC
+  - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
new file mode 100644 (file)
index 0000000..debcd32
--- /dev/null
@@ -0,0 +1,25 @@
+QCOM Secure Channel Manager (SCM)
+
+Qualcomm processors include an interface to communicate to the secure firmware.
+This interface allows for clients to request different types of actions.  These
+can include CPU power up/down, HDCP requests, loading of firmware, and other
+assorted actions.
+
+Required properties:
+- compatible: must contain "qcom,scm"
+- clocks: Should contain the core, iface, and bus clocks.
+- clock-names: Must contain "core" for the core clock, "iface" for the interface
+  clock and "bus" for the bus clock.
+
+Example:
+
+       firmware {
+               compatible = "simple-bus";
+
+               scm {
+                       compatible = "qcom,scm";
+                       clocks = <&gcc GCC_CE1_CLK> , <&gcc GCC_CE1_AXI_CLK>, <&gcc GCC_CE1_AHB_CLK>;
+                       clock-names = "core", "bus", "iface";
+               };
+       };
+
index 202565313e825fe897e03a05849fa54765efe2fe..100f0ae432691c36ff54609c86a8cc984b9a5b7d 100644 (file)
@@ -20,6 +20,8 @@ Optional Properties:
 
 - link-frequencies: List of allowed link frequencies in Hz. Each frequency is
        expressed as a 64-bit big-endian integer.
+- reset-gpios: GPIO handle which is connected to the reset pin of the chip.
+- standby-gpios: GPIO handle which is connected to the standby pin of the chip.
 
 For further reading on port node refer to
 Documentation/devicetree/bindings/media/video-interfaces.txt.
diff --git a/Documentation/devicetree/bindings/media/i2c/tvp5150.txt b/Documentation/devicetree/bindings/media/i2c/tvp5150.txt
new file mode 100644 (file)
index 0000000..8c0fc1a
--- /dev/null
@@ -0,0 +1,45 @@
+* Texas Instruments TVP5150 and TVP5151 video decoders
+
+The TVP5150 and TVP5151 are video decoders that convert baseband NTSC and PAL
+(and also SECAM in the TVP5151 case) video signals to either 8-bit 4:2:2 YUV
+with discrete syncs or 8-bit ITU-R BT.656 with embedded syncs output formats.
+
+Required Properties:
+- compatible: value must be "ti,tvp5150"
+- reg: I2C slave address
+
+Optional Properties:
+- pdn-gpios: phandle for the GPIO connected to the PDN pin, if any.
+- reset-gpios: phandle for the GPIO connected to the RESETB pin, if any.
+
+The device node must contain one 'port' child node for its digital output
+video port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Required Endpoint Properties for parallel synchronization:
+
+- hsync-active: active state of the HSYNC signal. Must be <1> (HIGH).
+- vsync-active: active state of the VSYNC signal. Must be <1> (HIGH).
+- field-even-active: field signal level during the even field data
+  transmission. Must be <0>.
+
+If none of hsync-active, vsync-active and field-even-active is specified,
+the endpoint is assumed to use embedded BT.656 synchronization.
+
+Example:
+
+&i2c2 {
+       ...
+       tvp5150@5c {
+               compatible = "ti,tvp5150";
+               reg = <0x5c>;
+               pdn-gpios = <&gpio4 30 GPIO_ACTIVE_LOW>;
+               reset-gpios = <&gpio6 7 GPIO_ACTIVE_LOW>;
+
+               port {
+                       tvp5150_1: endpoint {
+                               remote-endpoint = <&ccdc_ep>;
+                       };
+               };
+       };
+};
index 9dafe6b06cd286a053a5db66cbbf8a5288faba2a..619193ccf7ff831d876c151f9689d8074c20070d 100644 (file)
@@ -6,6 +6,7 @@ family of devices. The current blocks are always slaves and suppot one input
 channel which can be either RGB, YUYV or BT656.
 
  - compatible: Must be one of the following
+   - "renesas,vin-r8a7795" for the R8A7795 device
    - "renesas,vin-r8a7794" for the R8A7794 device
    - "renesas,vin-r8a7793" for the R8A7793 device
    - "renesas,vin-r8a7791" for the R8A7791 device
index 0cb94201bf921a191c617c4c84a4a4637fc9c8d0..d3436e5190f9196a64f42fd0872a218a893cc229 100644 (file)
@@ -5,11 +5,12 @@ and decoding function conforming to the JPEG baseline process, so that the JPU
 can encode image data and decode JPEG data quickly.
 
 Required properties:
-  - compatible: should containg one of the following:
-                       - "renesas,jpu-r8a7790" for R-Car H2
-                       - "renesas,jpu-r8a7791" for R-Car M2-W
-                       - "renesas,jpu-r8a7792" for R-Car V2H
-                       - "renesas,jpu-r8a7793" for R-Car M2-N
+- compatible: "renesas,jpu-<soctype>", "renesas,rcar-gen2-jpu" as fallback.
+       Examples with soctypes are:
+         - "renesas,jpu-r8a7790" for R-Car H2
+         - "renesas,jpu-r8a7791" for R-Car M2-W
+         - "renesas,jpu-r8a7792" for R-Car V2H
+         - "renesas,jpu-r8a7793" for R-Car M2-N
 
   - reg: Base address and length of the registers block for the JPU.
   - interrupts: JPU interrupt specifier.
@@ -17,7 +18,7 @@ Required properties:
 
 Example: R8A7790 (R-Car H2) JPU node
        jpeg-codec@fe980000 {
-               compatible = "renesas,jpu-r8a7790";
+               compatible = "renesas,jpu-r8a7790", "renesas,rcar-gen2-jpu";
                reg = <0 0xfe980000 0 0x10300>;
                interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_JPU>;
diff --git a/Documentation/devicetree/bindings/media/ti-cal.txt b/Documentation/devicetree/bindings/media/ti-cal.txt
new file mode 100644 (file)
index 0000000..ae9b52f
--- /dev/null
@@ -0,0 +1,72 @@
+Texas Instruments DRA72x CAMERA ADAPTATION LAYER (CAL)
+------------------------------------------------------
+
+The Camera Adaptation Layer (CAL) is a key component for image capture
+applications. The capture module provides the system interface and the
+processing capability to connect CSI2 image-sensor modules to the
+DRA72x device.
+
+Required properties:
+- compatible: must be "ti,dra72-cal"
+- reg: CAL Top level, Receiver Core #0, Receiver Core #1 and Camera RX
+       control address space
+- reg-names: cal_top, cal_rx_core0, cal_rx_core1, and camerrx_control
+            registers
+- interrupts: should contain IRQ line for the CAL;
+
+CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes
+should contain a 'port' child node with child 'endpoint' node. Please
+refer to the bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+       cal: cal@4845b000 {
+               compatible = "ti,dra72-cal";
+               ti,hwmods = "cal";
+               reg = <0x4845B000 0x400>,
+                     <0x4845B800 0x40>,
+                     <0x4845B900 0x40>,
+                     <0x4A002e94 0x4>;
+               reg-names = "cal_top",
+                           "cal_rx_core0",
+                           "cal_rx_core1",
+                           "camerrx_control";
+               interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       csi2_0: port@0 {
+                               reg = <0>;
+                               endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ar0330_1>;
+                               };
+                       };
+                       csi2_1: port@1 {
+                               reg = <1>;
+                       };
+               };
+       };
+
+       i2c5: i2c@4807c000 {
+               ar0330@10 {
+                       compatible = "ti,ar0330";
+                       reg = <0x10>;
+
+                       port {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               ar0330_1: endpoint {
+                                       reg = <0>;
+                                       clock-lanes = <1>;
+                                       data-lanes = <0 2 3 4>;
+                                       remote-endpoint = <&csi2_0>;
+                               };
+                       };
+               };
+       };
index 3dc13b68fc3ffb6dd136038f9116a96fbde996c7..ea5614b6f6130f32219e63115541f1bea328d47d 100644 (file)
@@ -13,6 +13,8 @@ Required Properties:
        - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
                                                        before RK3288
        - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
+       - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
+       - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
 
 Optional Properties:
 * clocks: from common clock binding: if ciu_drive and ciu_sample are
diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
new file mode 100644 (file)
index 0000000..70dd511
--- /dev/null
@@ -0,0 +1,86 @@
+* Qualcomm NAND controller
+
+Required properties:
+- compatible:          should be "qcom,ipq806x-nand"
+- reg:                 MMIO address range
+- clocks:              must contain core clock and always on clock
+- clock-names:         must contain "core" for the core clock and "aon" for the
+                       always on clock
+- dmas:                        DMA specifier, consisting of a phandle to the ADM DMA
+                       controller node and the channel number to be used for
+                       NAND. Refer to dma.txt and qcom_adm.txt for more details
+- dma-names:           must be "rxtx"
+- qcom,cmd-crci:       must contain the ADM command type CRCI block instance
+                       number specified for the NAND controller on the given
+                       platform
+- qcom,data-crci:      must contain the ADM data type CRCI block instance
+                       number specified for the NAND controller on the given
+                       platform
+- #address-cells:      <1> - subnodes give the chip-select number
+- #size-cells:         <0>
+
+* NAND chip-select
+
+Each controller may contain one or more subnodes to represent enabled
+chip-selects which (may) contain NAND flash chips. Their properties are as
+follows.
+
+Required properties:
+- compatible:          should contain "qcom,nandcs"
+- reg:                 a single integer representing the chip-select
+                       number (e.g., 0, 1, 2, etc.)
+- #address-cells:      see partition.txt
+- #size-cells:         see partition.txt
+- nand-ecc-strength:   see nand.txt
+- nand-ecc-step-size:  must be 512. see nand.txt for more details.
+
+Optional properties:
+- nand-bus-width:      see nand.txt
+
+Each nandcs device node may optionally contain a 'partitions' sub-node, which
+further contains sub-nodes describing the flash partition mapping. See
+partition.txt for more detail.
+
+Example:
+
+nand@1ac00000 {
+       compatible = "qcom,ebi2-nandc";
+       reg = <0x1ac00000 0x800>;
+
+       clocks = <&gcc EBI2_CLK>,
+                <&gcc EBI2_AON_CLK>;
+       clock-names = "core", "aon";
+
+       dmas = <&adm_dma 3>;
+       dma-names = "rxtx";
+       qcom,cmd-crci = <15>;
+       qcom,data-crci = <3>;
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       nandcs@0 {
+               compatible = "qcom,nandcs";
+               reg = <0>;
+
+               nand-ecc-strength = <4>;
+               nand-ecc-step-size = <512>;
+               nand-bus-width = <8>;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       partition@0 {
+                               label = "boot-nand";
+                               reg = <0 0x58a0000>;
+                       };
+
+                       partition@58a0000 {
+                               label = "fs-nand";
+                               reg = <0x58a0000 0x4000000>;
+                       };
+               };
+       };
+};
index 451fef26b4dfaf05b6782099e54816d52a52baa8..10587bdadbbe5a8e8fe703e23c194420e3701f9f 100644 (file)
@@ -68,7 +68,7 @@ ethernet@f0b60000 {
                phy1: ethernet-phy@1 {
                        max-speed = <1000>;
                        reg = <0x1>;
-                       compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22";
+                       compatible = "ethernet-phy-ieee802.3-c22";
                };
        };
 };
@@ -115,7 +115,7 @@ ethernet@f0ba0000 {
                phy0: ethernet-phy@0 {
                        max-speed = <1000>;
                        reg = <0x0>;
-                       compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22";
+                       compatible = "ethernet-phy-ieee802.3-c22";
                };
        };
 };
index 80411b2f04906bc065513cb8a968f1a57f132906..ecacfa44b1eb9603d8b77c712dcff565ef4f470d 100644 (file)
@@ -4,8 +4,6 @@ Required properties:
 - compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2".
   "hisilicon,hns-dsaf-v1" is for hip05.
   "hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612.
-- dsa-name: dsa fabric name who provide this interface.
-  should be "dsafX", X is the dsaf id.
 - mode: dsa fabric mode string. only support one of dsaf modes like these:
                "2port-64vf",
                "6port-16rss",
@@ -26,9 +24,8 @@ Required properties:
 
 Example:
 
-dsa: dsa@c7000000 {
+dsaf0: dsa@c7000000 {
        compatible = "hisilicon,hns-dsaf-v1";
-       dsa_name = "dsaf0";
        mode = "6port-16rss";
        interrupt-parent = <&mbigen_dsa>;
        reg = <0x0 0xC0000000 0x0 0x420000
index 41d19be7011ecd543af6c1226d2f332b8306a01e..e6a9d1c30878f89ff948d5fcd8cbe308e53327df 100644 (file)
@@ -4,8 +4,9 @@ Required properties:
 - compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2".
   "hisilicon,hns-nic-v1" is for hip05.
   "hisilicon,hns-nic-v2" is for Hi1610 and Hi1612.
-- ae-name: accelerator name who provides this interface,
-  is simply a name referring to the name of name in the accelerator node.
+- ae-handle: accelerator engine handle for hns,
+  specifies a reference to the associating hardware driver node.
+  see Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
 - port-id: is the index of port provided by DSAF (the accelerator). DSAF can
   connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They
   are called debug ports.
@@ -41,7 +42,7 @@ Example:
 
        ethernet@0{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <0>;
                local-mac-address = [a2 14 e4 4b 56 76];
        };
index aeea50c84e921fb301fc7fe9fd2cd6f295c7139c..d0cb8693963b51f447afbb9b6e6c5bd42e87f34e 100644 (file)
@@ -6,12 +6,17 @@ Required properties:
 - interrupts: interrupt for the device
 - phy: See ethernet.txt file in the same directory.
 - phy-mode: See ethernet.txt file in the same directory
-- clocks: a pointer to the reference clock for this device.
+- clocks: List of clocks for this device. At least one clock is
+  mandatory for the core clock. If several clocks are given, then the
+  clock-names property must be used to identify them.
 
 Optional properties:
 - tx-csum-limit: maximum mtu supported by port that allow TX checksum.
   Value is presented in bytes. If not used, by default 1600B is set for
   "marvell,armada-370-neta" and 9800B for others.
+- clock-names: List of names corresponding to clocks property; shall be
+  "core" for core clock and "bus" for the optional bus clock.
+
 
 Example:
 
index 79384113c2b060a4068413d427d3509cdcaa2d5b..694987d3c17a1085ba9fce589d81cc9ee99a7717 100644 (file)
@@ -38,7 +38,6 @@ Example :
 
                        phy11: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -48,7 +47,6 @@ Example :
                        };
                        phy12: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -58,7 +56,6 @@ Example :
                        };
                        phy13: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -68,7 +65,6 @@ Example :
                        };
                        phy14: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -85,7 +81,6 @@ Example :
 
                        phy21: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -95,7 +90,6 @@ Example :
                        };
                        phy22: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -105,7 +99,6 @@ Example :
                        };
                        phy23: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -115,7 +108,6 @@ Example :
                        };
                        phy24: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
index f65606f8d632b86bab2af28eb4a198d426a378ab..491f5bd55203b31c62b63ed2bcc624a701fd1580 100644 (file)
@@ -47,7 +47,6 @@ Example :
 
                        phy11: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -57,7 +56,6 @@ Example :
                        };
                        phy12: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -67,7 +65,6 @@ Example :
                        };
                        phy13: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -77,7 +74,6 @@ Example :
                        };
                        phy14: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -94,7 +90,6 @@ Example :
 
                        phy21: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -104,7 +99,6 @@ Example :
                        };
                        phy22: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -114,7 +108,6 @@ Example :
                        };
                        phy23: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -124,7 +117,6 @@ Example :
                        };
                        phy24: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
index 525e1658f2da5ca9ed22594fda97a25646ac2e20..bc1c3c8bf8fa37fa7e08dcabc65f1752a8a79749 100644 (file)
@@ -17,8 +17,7 @@ Optional Properties:
   "ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for
   PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45
   specifications. If neither of these are specified, the default is to
-  assume clause 22. The compatible list may also contain other
-  elements.
+  assume clause 22.
 
   If the phy's identifier is known then the list may contain an entry
   of the form: "ethernet-phy-idAAAA.BBBB" where
@@ -28,6 +27,9 @@ Optional Properties:
             4 hex digits. This is the chip vendor OUI bits 19:24,
             followed by 10 bits of a vendor specific ID.
 
+  The compatible list should not contain other values than those
+  listed here.
+
 - max-speed: Maximum PHY supported speed (10, 100, 1000...)
 
 - broken-turn-around: If set, indicates the PHY device does not correctly
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
new file mode 100644 (file)
index 0000000..9180724
--- /dev/null
@@ -0,0 +1,36 @@
+* Texas Instruments wl1271 wireless lan controller
+
+The wl1271 chip can be connected via SPI or via SDIO. This
+document describes the binding for the SPI connected chip.
+
+Required properties:
+- compatible :          Should be "ti,wl1271"
+- reg :                 Chip select address of device
+- spi-max-frequency :   Maximum SPI clocking speed of device in Hz
+- ref-clock-frequency : Reference clock frequency
+- interrupt-parent, interrupts :
+                        Should contain parameters for 1 interrupt line.
+                        Interrupt parameters: parent, line number, type.
+- vwlan-supply :        Point the node of the regulator that powers/enable the wl1271 chip
+
+Optional properties:
+- clock-xtal :          boolean, clock is generated from XTAL
+
+- Please consult Documentation/devicetree/bindings/spi/spi-bus.txt
+  for optional SPI connection related properties,
+
+Examples:
+
+&spi1 {
+       wl1271@1 {
+               compatible = "ti,wl1271";
+
+               reg = <1>;
+               spi-max-frequency = <48000000>;
+               clock-xtal;
+               ref-clock-frequency = <38400000>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+               vwlan-supply = <&vwlan_fixed>;
+       };
+};
index 4e8b90e43dd83c72d81df6d644317ba60b489559..07a75094c5a8ed07e927c0641584d5e8348e3dc6 100644 (file)
@@ -8,6 +8,7 @@ OHCI and EHCI controllers.
 Required properties:
 - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
              "renesas,pci-r8a7791" for the R8A7791 SoC;
+             "renesas,pci-r8a7793" for the R8A7793 SoC;
              "renesas,pci-r8a7794" for the R8A7794 SoC;
              "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
 
index 558fe528ae1951104b6266a2f5ed4849017c6b35..6cf99690eef94fa4ed033fb91e08c2aa010395bf 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
            "renesas,pcie-r8a7790" for the R8A7790 SoC;
            "renesas,pcie-r8a7791" for the R8A7791 SoC;
+           "renesas,pcie-r8a7793" for the R8A7793 SoC;
            "renesas,pcie-r8a7795" for the R8A7795 SoC;
            "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
 
index add7c38ec7d888b958df828139c1847ddc61c7e4..8662f3aaf312d9484a8e74373a5971a47d6b908a 100644 (file)
@@ -91,6 +91,9 @@ mpp60         60       gpio, dev(ale1), uart1(rxd), sata0(prsnt), pcie(rstout),
 mpp61         61       gpo, dev(we1), uart1(txd), audio(lrclk)
 mpp62         62       gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
                        audio(mclk), uart0(cts)
-mpp63         63       gpo, spi0(sck), tclk
+mpp63         63       gpio, spi0(sck), tclk
 mpp64         64       gpio, spi0(miso), spi0(cs1)
 mpp65         65       gpio, spi0(mosi), spi0(cs2)
+
+Note: According to the datasheet mpp63 is a gpo but there is at least
+one example of a gpio usage on the board D-Link DNS-327L
index 112756e11802c7ccfbaccca4462e0987f88e0053..13dc6a3fdb4a1b239a38f1cd19dd0bb1ff2a681d 100644 (file)
@@ -6,6 +6,7 @@ powered up/down by software based on different application scenes to save power.
 Required properties for power domain controller:
 - compatible: Should be one of the following.
        "rockchip,rk3288-power-controller" - for RK3288 SoCs.
+       "rockchip,rk3368-power-controller" - for RK3368 SoCs.
 - #power-domain-cells: Number of cells in a power-domain specifier.
        Should be 1 for multiple PM domains.
 - #address-cells: Should be 1.
@@ -14,6 +15,7 @@ Required properties for power domain controller:
 Required properties for power domain sub nodes:
 - reg: index of the power domain, should use macros in:
        "include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain.
+       "include/dt-bindings/power/rk3368-power.h" - for RK3368 type power domain.
 - clocks (optional): phandles to clocks which need to be enabled while power domain
        switches state.
 
@@ -31,11 +33,24 @@ Example:
                };
        };
 
+        power: power-controller {
+                compatible = "rockchip,rk3368-power-controller";
+                #power-domain-cells = <1>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                pd_gpu_1 {
+                        reg = <RK3368_PD_GPU_1>;
+                        clocks = <&cru ACLK_GPU_CFG>;
+                };
+        };
+
 Node of a device using power domains must have a power-domains property,
 containing a phandle to the power device node and an index specifying which
 power domain to use.
 The index should use macros in:
        "include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain.
+       "include/dt-bindings/power/rk3368-power.h" - for rk3368 type power domain.
 
 Example of the node using power domain:
 
@@ -44,3 +59,9 @@ Example of the node using power domain:
                power-domains = <&power RK3288_PD_GPU>;
                /* ... */
        };
+
+       node {
+                /* ... */
+                power-domains = <&power RK3368_PD_GPU_1>;
+                /* ... */
+        };
index 332e625f6ed01cb4e442c0ea530ab29eb92f2a77..e5ee3f15989337f37b9d5e43036c31a2b4c9ff11 100644 (file)
@@ -1,8 +1,9 @@
 * Renesas R-Car Thermal
 
 Required properties:
-- compatible           : "renesas,thermal-<soctype>", "renesas,rcar-thermal"
-                         as fallback.
+- compatible           : "renesas,thermal-<soctype>",
+                          "renesas,rcar-gen2-thermal" (with thermal-zone) or
+                          "renesas,rcar-thermal" (without thermal-zone) as fallback.
                          Examples with soctypes are:
                            - "renesas,thermal-r8a73a4" (R-Mobile APE6)
                            - "renesas,thermal-r8a7779" (R-Car H1)
@@ -36,3 +37,35 @@ thermal@e61f0000 {
                0xe61f0300 0x38>;
        interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 };
+
+Example (with thermal-zone):
+
+thermal-zones {
+       cpu_thermal: cpu-thermal {
+               polling-delay-passive   = <1000>;
+               polling-delay           = <5000>;
+
+               thermal-sensors = <&thermal>;
+
+               trips {
+                       cpu-crit {
+                               temperature     = <115000>;
+                               hysteresis      = <0>;
+                               type            = "critical";
+                       };
+               };
+               cooling-maps {
+               };
+       };
+};
+
+thermal: thermal@e61f0000 {
+       compatible =    "renesas,thermal-r8a7790",
+                       "renesas,rcar-gen2-thermal",
+                       "renesas,rcar-thermal";
+       reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
+       interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+       power-domains = <&cpg_clocks>;
+       #thermal-sensor-cells = <0>;
+};
index 669dc6ce43305e17f29ea95066bec2a80b59b70d..6f4b12f7b8444cc121ece7f6de89ebb217e64461 100644 (file)
@@ -190,7 +190,7 @@ and watch another one.
 Patches, comments and suggestions are very very welcome.
 
 3. Acknowledgements
-   Amaury Demol (ademol@dibcom.fr) and Francois Kanounnikoff from DiBcom for
+   Amaury Demol (Amaury.Demol@parrot.com) and Francois Kanounnikoff from DiBcom for
     providing specs, code and help, on which the dvb-dibusb, dib3000mb and
     dib3000mc are based.
 
index af6816bccb439d76a7f80d43f97f4a95e7d604c2..df1d1f3c9af290aa6ffaa1584f71ba6025e54c4e 100644 (file)
@@ -9,7 +9,7 @@
     |       alpha: | TODO |
     |         arc: | TODO |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |       avr32: | TODO |
     |    blackfin: | TODO |
     |         c6x: | TODO |
diff --git a/Documentation/filesystems/orangefs.txt b/Documentation/filesystems/orangefs.txt
new file mode 100644 (file)
index 0000000..925a53e
--- /dev/null
@@ -0,0 +1,353 @@
+ORANGEFS
+========
+
+OrangeFS is an LGPL userspace scale-out parallel storage system. It is ideal
+for large storage problems faced by HPC, BigData, Streaming Video,
+Genomics, Bioinformatics.
+
+Orangefs, originally called PVFS, was first developed in 1993 by
+Walt Ligon and Eric Blumer as a parallel file system for Parallel
+Virtual Machine (PVM) as part of a NASA grant to study the I/O patterns
+of parallel programs.
+
+Orangefs features include:
+
+  * Distributes file data among multiple file servers
+  * Supports simultaneous access by multiple clients
+  * Stores file data and metadata on servers using local file system
+    and access methods
+  * Userspace implementation is easy to install and maintain
+  * Direct MPI support
+  * Stateless
+
+
+MAILING LIST
+============
+
+http://beowulf-underground.org/mailman/listinfo/pvfs2-users
+
+
+DOCUMENTATION
+=============
+
+http://www.orangefs.org/documentation/
+
+
+USERSPACE FILESYSTEM SOURCE
+===========================
+
+http://www.orangefs.org/download
+
+Orangefs versions prior to 2.9.3 would not be compatible with the
+upstream version of the kernel client.
+
+
+BUILDING THE USERSPACE FILESYSTEM ON A SINGLE SERVER
+====================================================
+
+When Orangefs is upstream, "--with-kernel" shouldn't be needed, but
+until then the path to where the kernel with the Orangefs kernel client
+patch was built is needed to ensure that pvfs2-client-core (the bridge
+between kernel space and user space) will build properly. You can omit
+--prefix if you don't care that things are sprinkled around in
+/usr/local.
+
+./configure --prefix=/opt/ofs --with-kernel=/path/to/orangefs/kernel
+
+make
+
+make install
+
+Create an orangefs config file:
+/opt/ofs/bin/pvfs2-genconfig /etc/pvfs2.conf
+
+  for "Enter hostnames", use the hostname, don't let it default to
+  localhost.
+
+create a pvfs2tab file in /etc:
+cat /etc/pvfs2tab
+tcp://myhostname:3334/orangefs /mymountpoint pvfs2 defaults,noauto 0 0
+
+create the mount point you specified in the tab file if needed:
+mkdir /mymountpoint
+
+bootstrap the server:
+/opt/ofs/sbin/pvfs2-server /etc/pvfs2.conf -f
+
+start the server:
+/opt/osf/sbin/pvfs2-server /etc/pvfs2.conf
+
+Now the server is running. At this point you might like to
+prove things are working with:
+
+/opt/osf/bin/pvfs2-ls /mymountpoint
+
+You might not want to enforce selinux, it doesn't seem to matter by
+linux 3.11...
+
+If stuff seems to be working, turn on the client core:
+/opt/osf/sbin/pvfs2-client -p /opt/osf/sbin/pvfs2-client-core
+
+Mount your filesystem.
+mount -t pvfs2 tcp://myhostname:3334/orangefs /mymountpoint
+
+
+OPTIONS
+=======
+
+The following mount options are accepted:
+
+  acl
+    Allow the use of Access Control Lists on files and directories.
+
+  intr
+    Some operations between the kernel client and the user space
+    filesystem can be interruptible, such as changes in debug levels
+    and the setting of tunable parameters.
+
+  local_lock
+    Enable posix locking from the perspective of "this" kernel. The
+    default file_operations lock action is to return ENOSYS. Posix
+    locking kicks in if the filesystem is mounted with -o local_lock.
+    Distributed locking is being worked on for the future.
+
+
+DEBUGGING
+=========
+
+If you want the debug (GOSSIP) statements in a particular
+source file (inode.c for example) go to syslog:
+
+  echo inode > /sys/kernel/debug/orangefs/kernel-debug
+
+No debugging (the default):
+
+  echo none > /sys/kernel/debug/orangefs/kernel-debug
+
+Debugging from several source files:
+
+  echo inode,dir > /sys/kernel/debug/orangefs/kernel-debug
+
+All debugging:
+
+  echo all > /sys/kernel/debug/orangefs/kernel-debug
+
+Get a list of all debugging keywords:
+
+  cat /sys/kernel/debug/orangefs/debug-help
+
+
+PROTOCOL BETWEEN KERNEL MODULE AND USERSPACE
+============================================
+
+Orangefs is a user space filesystem and an associated kernel module.
+We'll just refer to the user space part of Orangefs as "userspace"
+from here on out. Orangefs descends from PVFS, and userspace code
+still uses PVFS for function and variable names. Userspace typedefs
+many of the important structures. Function and variable names in
+the kernel module have been transitioned to "orangefs", and The Linux
+Coding Style avoids typedefs, so kernel module structures that
+correspond to userspace structures are not typedefed.
+
+The kernel module implements a pseudo device that userspace
+can read from and write to. Userspace can also manipulate the
+kernel module through the pseudo device with ioctl.
+
+THE BUFMAP:
+
+At startup userspace allocates two page-size-aligned (posix_memalign)
+mlocked memory buffers, one is used for IO and one is used for readdir
+operations. The IO buffer is 41943040 bytes and the readdir buffer is
+4194304 bytes. Each buffer contains logical chunks, or partitions, and
+a pointer to each buffer is added to its own PVFS_dev_map_desc structure
+which also describes its total size, as well as the size and number of
+the partitions.
+
+A pointer to the IO buffer's PVFS_dev_map_desc structure is sent to a
+mapping routine in the kernel module with an ioctl. The structure is
+copied from user space to kernel space with copy_from_user and is used
+to initialize the kernel module's "bufmap" (struct orangefs_bufmap), which
+then contains:
+
+  * refcnt - a reference counter
+  * desc_size - PVFS2_BUFMAP_DEFAULT_DESC_SIZE (4194304) - the IO buffer's
+    partition size, which represents the filesystem's block size and
+    is used for s_blocksize in super blocks.
+  * desc_count - PVFS2_BUFMAP_DEFAULT_DESC_COUNT (10) - the number of
+    partitions in the IO buffer.
+  * desc_shift - log2(desc_size), used for s_blocksize_bits in super blocks.
+  * total_size - the total size of the IO buffer.
+  * page_count - the number of 4096 byte pages in the IO buffer.
+  * page_array - a pointer to page_count * (sizeof(struct page*)) bytes
+    of kcalloced memory. This memory is used as an array of pointers
+    to each of the pages in the IO buffer through a call to get_user_pages.
+  * desc_array - a pointer to desc_count * (sizeof(struct orangefs_bufmap_desc))
+    bytes of kcalloced memory. This memory is further intialized:
+
+      user_desc is the kernel's copy of the IO buffer's ORANGEFS_dev_map_desc
+      structure. user_desc->ptr points to the IO buffer.
+
+      pages_per_desc = bufmap->desc_size / PAGE_SIZE
+      offset = 0
+
+        bufmap->desc_array[0].page_array = &bufmap->page_array[offset]
+        bufmap->desc_array[0].array_count = pages_per_desc = 1024
+        bufmap->desc_array[0].uaddr = (user_desc->ptr) + (0 * 1024 * 4096)
+        offset += 1024
+                           .
+                           .
+                           .
+        bufmap->desc_array[9].page_array = &bufmap->page_array[offset]
+        bufmap->desc_array[9].array_count = pages_per_desc = 1024
+        bufmap->desc_array[9].uaddr = (user_desc->ptr) +
+                                               (9 * 1024 * 4096)
+        offset += 1024
+
+  * buffer_index_array - a desc_count sized array of ints, used to
+    indicate which of the IO buffer's partitions are available to use.
+  * buffer_index_lock - a spinlock to protect buffer_index_array during update.
+  * readdir_index_array - a five (ORANGEFS_READDIR_DEFAULT_DESC_COUNT) element
+    int array used to indicate which of the readdir buffer's partitions are
+    available to use.
+  * readdir_index_lock - a spinlock to protect readdir_index_array during
+    update.
+
+OPERATIONS:
+
+The kernel module builds an "op" (struct orangefs_kernel_op_s) when it
+needs to communicate with userspace. Part of the op contains the "upcall"
+which expresses the request to userspace. Part of the op eventually
+contains the "downcall" which expresses the results of the request.
+
+The slab allocator is used to keep a cache of op structures handy.
+
+The life cycle of a typical op goes like this:
+
+  - obtain and initialize an op structure from the op_cache.
+
+  - queue the op to the pvfs device so that its upcall data can be
+    read by userspace.
+
+  - wait for userspace to write downcall data back to the pvfs device.
+
+  - consume the downcall and return the op struct to the op_cache.
+
+Some ops are atypical with respect to their payloads: readdir and io ops.
+
+  - readdir ops use the smaller of the two pre-allocated pre-partitioned
+    memory buffers. The readdir buffer is only available to userspace.
+    The kernel module obtains an index to a free partition before launching
+    a readdir op. Userspace deposits the results into the indexed partition
+    and then writes them to back to the pvfs device.
+
+  - io (read and write) ops use the larger of the two pre-allocated
+    pre-partitioned memory buffers. The IO buffer is accessible from
+    both userspace and the kernel module. The kernel module obtains an
+    index to a free partition before launching an io op. The kernel module
+    deposits write data into the indexed partition, to be consumed
+    directly by userspace. Userspace deposits the results of read
+    requests into the indexed partition, to be consumed directly
+    by the kernel module.
+
+Responses to kernel requests are all packaged in pvfs2_downcall_t
+structs. Besides a few other members, pvfs2_downcall_t contains a
+union of structs, each of which is associated with a particular
+response type.
+
+The several members outside of the union are:
+ - int32_t type - type of operation.
+ - int32_t status - return code for the operation.
+ - int64_t trailer_size - 0 unless readdir operation.
+ - char *trailer_buf - initialized to NULL, used during readdir operations.
+
+The appropriate member inside the union is filled out for any
+particular response.
+
+  PVFS2_VFS_OP_FILE_IO
+    fill a pvfs2_io_response_t
+
+  PVFS2_VFS_OP_LOOKUP
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_CREATE
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_SYMLINK
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_GETATTR
+    fill in a PVFS_sys_attr_s (tons of stuff the kernel doesn't need)
+    fill in a string with the link target when the object is a symlink.
+
+  PVFS2_VFS_OP_MKDIR
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_STATFS
+    fill a pvfs2_statfs_response_t with useless info <g>. It is hard for
+    us to know, in a timely fashion, these statistics about our
+    distributed network filesystem. 
+
+  PVFS2_VFS_OP_FS_MOUNT
+    fill a pvfs2_fs_mount_response_t which is just like a PVFS_object_kref
+    except its members are in a different order and "__pad1" is replaced
+    with "id".
+
+  PVFS2_VFS_OP_GETXATTR
+    fill a pvfs2_getxattr_response_t
+
+  PVFS2_VFS_OP_LISTXATTR
+    fill a pvfs2_listxattr_response_t
+
+  PVFS2_VFS_OP_PARAM
+    fill a pvfs2_param_response_t
+
+  PVFS2_VFS_OP_PERF_COUNT
+    fill a pvfs2_perf_count_response_t
+
+  PVFS2_VFS_OP_FSKEY
+    file a pvfs2_fs_key_response_t
+
+  PVFS2_VFS_OP_READDIR
+    jamb everything needed to represent a pvfs2_readdir_response_t into
+    the readdir buffer descriptor specified in the upcall.
+
+writev() on /dev/pvfs2-req is used to pass responses to the requests
+made by the kernel side.
+
+A buffer_list containing:
+  - a pointer to the prepared response to the request from the
+    kernel (struct pvfs2_downcall_t).
+  - and also, in the case of a readdir request, a pointer to a
+    buffer containing descriptors for the objects in the target
+    directory.
+... is sent to the function (PINT_dev_write_list) which performs
+the writev.
+
+PINT_dev_write_list has a local iovec array: struct iovec io_array[10];
+
+The first four elements of io_array are initialized like this for all
+responses:
+
+  io_array[0].iov_base = address of local variable "proto_ver" (int32_t)
+  io_array[0].iov_len = sizeof(int32_t)
+
+  io_array[1].iov_base = address of global variable "pdev_magic" (int32_t)
+  io_array[1].iov_len = sizeof(int32_t)
+  
+  io_array[2].iov_base = address of parameter "tag" (PVFS_id_gen_t)
+  io_array[2].iov_len = sizeof(int64_t)
+
+  io_array[3].iov_base = address of out_downcall member (pvfs2_downcall_t)
+                         of global variable vfs_request (vfs_request_t)
+  io_array[3].iov_len = sizeof(pvfs2_downcall_t)
+
+Readdir responses initialize the fifth element io_array like this:
+
+  io_array[4].iov_base = contents of member trailer_buf (char *)
+                         from out_downcall member of global variable
+                         vfs_request
+  io_array[4].iov_len = contents of member trailer_size (PVFS_size)
+                        from out_downcall member of global variable
+                        vfs_request
+  
+
index fde9fd06fa988b2eac60f10880973351e783fe37..843b045b4069a62c154bdf6c2723b3061bd81abd 100644 (file)
@@ -240,8 +240,8 @@ Table 1-2: Contents of the status files (as of 4.1)
  RssFile                     size of resident file mappings
  RssShmem                    size of resident shmem memory (includes SysV shm,
                              mapping of tmpfs and shared anonymous mappings)
- VmData                      size of data, stack, and text segments
- VmStk                       size of data, stack, and text segments
+ VmData                      size of private data segments
+ VmStk                       size of stack segments
  VmExe                       size of text segment
  VmLib                       size of shared library code
  VmPTE                       size of page table entries
@@ -356,7 +356,7 @@ address           perms offset  dev   inode      pathname
 a7cb1000-a7cb2000 ---p 00000000 00:00 0
 a7cb2000-a7eb2000 rw-p 00000000 00:00 0
 a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0          [stack:1001]
+a7eb3000-a7ed5000 rw-p 00000000 00:00 0
 a7ed5000-a8008000 r-xp 00000000 03:00 4222       /lib/libc.so.6
 a8008000-a800a000 r--p 00133000 03:00 4222       /lib/libc.so.6
 a800a000-a800b000 rw-p 00135000 03:00 4222       /lib/libc.so.6
@@ -388,7 +388,6 @@ is not associated with a file:
 
  [heap]                   = the heap of the program
  [stack]                  = the stack of the main process
- [stack:1001]             = the stack of the thread with tid 1001
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
 
@@ -396,10 +395,8 @@ is not associated with a file:
 
 The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
 of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. This is a key difference from the
-content of /proc/PID/maps, where you will see all mappings that are being used
-as stack by all of those tasks. Hence, for the example above, the task-level
-map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
+as [stack] if that task sees it as a stack. Hence, for the example above, the
+task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
 
 08048000-08049000 r-xp 00000000 03:00 8312       /opt/test
 08049000-0804a000 rw-p 00001000 03:00 8312       /opt/test
diff --git a/Documentation/hwmon/ltc2990 b/Documentation/hwmon/ltc2990
new file mode 100644 (file)
index 0000000..c25211e
--- /dev/null
@@ -0,0 +1,43 @@
+Kernel driver ltc2990
+=====================
+
+Supported chips:
+  * Linear Technology LTC2990
+    Prefix: 'ltc2990'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2990
+
+Author: Mike Looijmans <mike.looijmans@topic.nl>
+
+
+Description
+-----------
+
+LTC2990 is a Quad I2C Voltage, Current and Temperature Monitor.
+The chip's inputs can measure 4 voltages, or two inputs together (1+2 and 3+4)
+can be combined to measure a differential voltage, which is typically used to
+measure current through a series resistor, or a temperature.
+
+This driver currently uses the 2x differential mode only. In order to support
+other modes, the driver will need to be expanded.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+
+Sysfs attributes
+----------------
+
+The "curr*_input" measurements actually report the voltage drop across the
+input pins in microvolts. This is equivalent to the current through a 1mOhm
+sense resistor. Divide the reported value by the actual sense resistor value
+in mOhm to get the actual value.
+
+in0_input     Voltage at Vcc pin in millivolt (range 2.5V to 5V)
+temp1_input   Internal chip temperature in millidegrees Celcius
+curr1_input   Current in mA across v1-v2 assuming a 1mOhm sense resistor.
+curr2_input   Current in mA across v3-v4 assuming a 1mOhm sense resistor.
index 37cbf472a19d19aef7aea0217d008943eb28909f..027633246bd8a3f052264b770a8115e22e3db194 100644 (file)
@@ -5,17 +5,17 @@ Supported chips:
   * Maxim MAX34440
     Prefixes: 'max34440'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34440.pdf
   * Maxim MAX34441
     PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
     Prefixes: 'max34441'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34441.pdf
   * Maxim MAX34446
     PMBus Power-Supply Data Logger
     Prefixes: 'max34446'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
   * Maxim MAX34460
     PMBus 12-Channel Voltage Monitor & Sequencer
     Prefix: 'max34460'
index 87d40a72f6a1bec998be718138347a875c00117a..9a53c929f017d16527270bc2244352edf1d34cd8 100644 (file)
@@ -1496,6 +1496,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        could change it dynamically, usually by
                        /sys/module/printk/parameters/ignore_loglevel.
 
+       ignore_rlimit_data
+                       Ignore RLIMIT_DATA setting for data mappings,
+                       print warning at first misuse.  Can be changed via
+                       /sys/module/kernel/parameters/ignore_rlimit_data.
+
        ihash_entries=  [KNL]
                        Set number of hash buckets for inode cache.
 
@@ -4230,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        The default value of this parameter is determined by
                        the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
 
+       workqueue.debug_force_rr_cpu
+                       Workqueue used to implicitly guarantee that work
+                       items queued without explicit CPU specified are put
+                       on the local CPU.  This guarantee is no longer true
+                       and while local CPU is still preferred work items
+                       may be put on foreign CPUs.  This debug option
+                       forces round-robin CPU selection to flush out
+                       usages which depend on the now broken guarantee.
+                       When enabled, memory and cache locality will be
+                       impacted.
+
        x2apic_phys     [X86-64,APIC] Use x2apic physical mode instead of
                        default x2apic cluster mode on platforms
                        supporting x2apic.
index ff23b755f5e45cc8b6d367f2e55564cdf3a1782d..1b5e7a7f2185be117ba99d3c4303d14671cb9840 100644 (file)
@@ -187,7 +187,7 @@ interfaces to the kernel module settings.
 
 For more information, please see the manpage (man batctl).
 
-batctl is available on http://www.open-mesh.org/
+batctl is available on https://www.open-mesh.org/
 
 
 CONTACT
index ceb44a095a27ba98804442c1f7aa91ed616b7d42..73b36d7c7b0d67309cce7bf0bd8a46f0c6ee7ea1 100644 (file)
@@ -594,7 +594,7 @@ tcp_fastopen - INTEGER
 
 tcp_syn_retries - INTEGER
        Number of times initial SYNs for an active TCP connection attempt
-       will be retransmitted. Should not be higher than 255. Default value
+       will be retransmitted. Should not be higher than 127. Default value
        is 6, which corresponds to 63seconds till the last retransmission
        with the current initial RTO of 1second. With this the final timeout
        for an active TCP connection attempt will happen after 127seconds.
index 3a930072b161fd51b3b1d007daac673526609dc2..ec8f934c2eb240f93a54b0044faadd913e7855f8 100644 (file)
@@ -28,6 +28,23 @@ radiotap headers and used to control injection:
    IEEE80211_RADIOTAP_F_TX_NOACK: frame should be sent without waiting for
                                  an ACK even if it is a unicast frame
 
+ * IEEE80211_RADIOTAP_RATE
+
+   legacy rate for the transmission (only for devices without own rate control)
+
+ * IEEE80211_RADIOTAP_MCS
+
+   HT rate for the transmission (only for devices without own rate control).
+   Also some flags are parsed
+
+   IEEE80211_TX_RC_SHORT_GI: use short guard interval
+   IEEE80211_TX_RC_40_MHZ_WIDTH: send in HT40 mode
+
+ * IEEE80211_RADIOTAP_DATA_RETRIES
+
+   number of retries when either IEEE80211_RADIOTAP_RATE or
+   IEEE80211_RADIOTAP_MCS was used
+
 The injection code can also skip all other currently defined radiotap fields
 facilitating replay of captured radiotap headers directly.
 
index 053f613fc9a913af132da7f794742c45d6727dfc..07e4cdf024073033e2a52213bcbed4d562111a33 100644 (file)
@@ -3025,7 +3025,7 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
 and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
 which is the maximum number of possibly pending cpu-local interrupts.
 
-4.90 KVM_SMI
+4.96 KVM_SMI
 
 Capability: KVM_CAP_X86_SMM
 Architectures: x86
index 9f9ec9f76039404a15114c97fe67535fe41fa88c..4e4b6f10d8410f84d8ea64ac51c68a6932bf2158 100644 (file)
@@ -400,3 +400,7 @@ wm8350_wdt:
 nowayout: Watchdog cannot be stopped once started
        (default=kernel config parameter)
 -------------------------------------------------
+sun4v_wdt:
+timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
index 30aca4aa5467b53d6a171a27b57a4357fa09127b..74dfe572c21ed3b0140d15fdf9c048396339aa11 100644 (file)
@@ -223,9 +223,7 @@ F:  drivers/scsi/aacraid/
 
 ABI/API
 L:     linux-api@vger.kernel.org
-F:     Documentation/ABI/
 F:     include/linux/syscalls.h
-F:     include/uapi/
 F:     kernel/sys_ni.c
 
 ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
@@ -675,24 +673,25 @@ F:        drivers/gpu/drm/radeon/radeon_kfd.c
 F:     drivers/gpu/drm/radeon/radeon_kfd.h
 F:     include/uapi/linux/kfd_ioctl.h
 
+AMD SEATTLE DEVICE TREE SUPPORT
+M:     Brijesh Singh <brijeshkumar.singh@amd.com>
+M:     Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+M:     Tom Lendacky <thomas.lendacky@amd.com>
+S:     Supported
+F:     arch/arm64/boot/dts/amd/
+
 AMD XGBE DRIVER
 M:     Tom Lendacky <thomas.lendacky@amd.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/amd/xgbe/
+F:     arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
 
 AMS (Apple Motion Sensor) DRIVER
 M:     Michael Hanselmann <linux-kernel@hansmi.ch>
 S:     Supported
 F:     drivers/macintosh/ams/
 
-AMSO1100 RNIC DRIVER
-M:     Tom Tucker <tom@opengridcomputing.com>
-M:     Steve Wise <swise@opengridcomputing.com>
-L:     linux-rdma@vger.kernel.org
-S:     Maintained
-F:     drivers/infiniband/hw/amso1100/
-
 ANALOG DEVICES INC AD9389B DRIVER
 M:     Hans Verkuil <hans.verkuil@cisco.com>
 L:     linux-media@vger.kernel.org
@@ -967,6 +966,8 @@ M:  Rob Herring <robh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-highbank/
+F:     arch/arm/boot/dts/highbank.dts
+F:     arch/arm/boot/dts/ecx-*.dts*
 
 ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
 M:     Krzysztof Halasa <khalasa@piap.pl>
@@ -1042,6 +1043,7 @@ M:        Barry Song <baohua@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
 S:     Maintained
+F:     arch/arm/boot/dts/prima2*
 F:     arch/arm/mach-prima2/
 F:     drivers/clk/sirf/
 F:     drivers/clocksource/timer-prima2.c
@@ -1143,6 +1145,10 @@ W:       http://www.hisilicon.com
 S:     Supported
 T:     git git://github.com/hisilicon/linux-hisi.git
 F:     arch/arm/mach-hisi/
+F:     arch/arm/boot/dts/hi3*
+F:     arch/arm/boot/dts/hip*
+F:     arch/arm/boot/dts/hisi*
+F:     arch/arm64/boot/dts/hisilicon/
 
 ARM/HP JORNADA 7XX MACHINE SUPPORT
 M:     Kristoffer Ericson <kristoffer.ericson@gmail.com>
@@ -1219,6 +1225,7 @@ M:        Santosh Shilimkar <ssantosh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-keystone/
+F:     arch/arm/boot/dts/k2*
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
 ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
@@ -1287,6 +1294,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-berlin/
 F:     arch/arm/boot/dts/berlin*
+F:     arch/arm64/boot/dts/marvell/berlin*
 
 
 ARM/Marvell Dove/MV78xx0/Orion SOC support
@@ -1425,7 +1433,10 @@ S:       Maintained
 F:     arch/arm/boot/dts/qcom-*.dts
 F:     arch/arm/boot/dts/qcom-*.dtsi
 F:     arch/arm/mach-qcom/
+F:     arch/arm64/boot/dts/qcom/*
+F:     drivers/i2c/busses/i2c-qup.c
 F:     drivers/soc/qcom/
+F:     drivers/spi/spi-qup.c
 F:     drivers/tty/serial/msm_serial.h
 F:     drivers/tty/serial/msm_serial.c
 F:     drivers/*/pm8???-*
@@ -1484,6 +1495,8 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/s3c*
+F:     arch/arm/boot/dts/s5p*
+F:     arch/arm/boot/dts/samsung*
 F:     arch/arm/boot/dts/exynos*
 F:     arch/arm64/boot/dts/exynos/
 F:     arch/arm/plat-samsung/
@@ -1493,6 +1506,7 @@ F:        arch/arm/mach-s5p*/
 F:     arch/arm/mach-exynos*/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
+F:     drivers/soc/samsung/*
 F:     drivers/spi/spi-s3c*
 F:     sound/soc/samsung/*
 F:     Documentation/arm/Samsung/
@@ -1563,6 +1577,7 @@ S:        Maintained
 F:     arch/arm/mach-socfpga/
 F:     arch/arm/boot/dts/socfpga*
 F:     arch/arm/configs/socfpga_defconfig
+F:     arch/arm64/boot/dts/altera/
 W:     http://www.rocketboards.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
@@ -1716,7 +1731,7 @@ M:        Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/vexpress*
-F:     arch/arm64/boot/dts/arm/vexpress*
+F:     arch/arm64/boot/dts/arm/
 F:     arch/arm/mach-vexpress/
 F:     */*/vexpress*
 F:     */*/*/vexpress*
@@ -2147,7 +2162,7 @@ M:        Marek Lindner <mareklindner@neomailbox.ch>
 M:     Simon Wunderlich <sw@simonwunderlich.de>
 M:     Antonio Quartulli <a@unstable.cc>
 L:     b.a.t.m.a.n@lists.open-mesh.org
-W:     http://www.open-mesh.org/
+W:     https://www.open-mesh.org/
 S:     Maintained
 F:     net/batman-adv/
 
@@ -2343,6 +2358,7 @@ F:        arch/arm/mach-bcm/
 F:     arch/arm/boot/dts/bcm113*
 F:     arch/arm/boot/dts/bcm216*
 F:     arch/arm/boot/dts/bcm281*
+F:     arch/arm64/boot/dts/broadcom/
 F:     arch/arm/configs/bcm_defconfig
 F:     drivers/mmc/host/sdhci-bcm-kona.c
 F:     drivers/clocksource/bcm_kona_timer.c
@@ -3445,7 +3461,7 @@ S:        Maintained
 F:     drivers/usb/dwc2/
 
 DESIGNWARE USB3 DRD IP DRIVER
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -4184,13 +4200,6 @@ W:       http://aeschi.ch.eu.org/efs/
 S:     Orphan
 F:     fs/efs/
 
-EHCA (IBM GX bus InfiniBand adapter) DRIVER
-M:     Hoang-Nam Nguyen <hnguyen@de.ibm.com>
-M:     Christoph Raisch <raisch@de.ibm.com>
-L:     linux-rdma@vger.kernel.org
-S:     Supported
-F:     drivers/infiniband/hw/ehca/
-
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
 M:     Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
@@ -5809,12 +5818,6 @@ M:       Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
 S:     Maintained
 F:     net/ipv4/netfilter/ipt_MASQUERADE.c
 
-IPATH DRIVER
-M:     Mike Marciniszyn <infinipath@intel.com>
-L:     linux-rdma@vger.kernel.org
-S:     Maintained
-F:     drivers/staging/rdma/ipath/
-
 IPMI SUBSYSTEM
 M:     Corey Minyard <minyard@acm.org>
 L:     openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -7370,7 +7373,7 @@ F:        drivers/tty/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
@@ -7939,7 +7942,7 @@ F:        drivers/media/platform/omap3isp/
 F:     drivers/staging/media/omap4iss/
 
 OMAP USB SUPPORT
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -8127,6 +8130,14 @@ S:       Supported
 F:     fs/overlayfs/
 F:     Documentation/filesystems/overlayfs.txt
 
+ORANGEFS FILESYSTEM
+M:     Mike Marshall <hubcap@omnibond.com>
+L:     pvfs2-developers@beowulf-underground.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
+S:     Supported
+F:     fs/orangefs/
+F:     Documentation/filesystems/orangefs.txt
+
 P54 WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@googlemail.com>
 L:     linux-wireless@vger.kernel.org
@@ -8818,6 +8829,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/hzhuang1/linux.git
 T:     git git://github.com/rjarzmik/linux.git
 S:     Maintained
+F:     arch/arm/boot/dts/pxa*
 F:     arch/arm/mach-pxa/
 F:     drivers/dma/pxa*
 F:     drivers/pcmcia/pxa2xx*
@@ -8847,6 +8859,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/hzhuang1/linux.git
 T:     git git://git.linaro.org/people/ycmiao/pxa-linux.git
 S:     Maintained
+F:     arch/arm/boot/dts/mmp*
 F:     arch/arm/mach-mmp/
 
 PXA MMCI DRIVER
@@ -9793,10 +9806,11 @@ S:      Supported
 F:     drivers/scsi/be2iscsi/
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
-M:     Sathya Perla <sathya.perla@avagotech.com>
-M:     Ajit Khaparde <ajit.khaparde@avagotech.com>
-M:     Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
-M:     Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+M:     Sathya Perla <sathya.perla@broadcom.com>
+M:     Ajit Khaparde <ajit.khaparde@broadcom.com>
+M:     Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
+M:     Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+M:     Somnath Kotur <somnath.kotur@broadcom.com>
 L:     netdev@vger.kernel.org
 W:     http://www.emulex.com
 S:     Supported
@@ -10158,6 +10172,7 @@ S:      Supported
 F:     drivers/media/pci/solo6x10/
 
 SOFTWARE RAID (Multiple Disks) SUPPORT
+M:     Shaohua Li <shli@kernel.org>
 L:     linux-raid@vger.kernel.org
 T:     git git://neil.brown.name/md
 S:     Supported
@@ -10173,7 +10188,7 @@ F:      drivers/net/ethernet/natsemi/sonic.*
 
 SONICS SILICON BACKPLANE DRIVER (SSB)
 M:     Michael Buesch <m@bues.ch>
-L:     netdev@vger.kernel.org
+L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/ssb/
 F:     include/linux/ssb/
@@ -10291,6 +10306,7 @@ L:      spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
 S:     Maintained
+F:     arch/arm/boot/dts/spear*
 F:     arch/arm/mach-spear/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -10853,6 +10869,14 @@ L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
+TI VPE/CAL DRIVERS
+M:     Benoit Parrot <bparrot@ti.com>
+L:     linux-media@vger.kernel.org
+W:     http://linuxtv.org/
+Q:     http://patchwork.linuxtv.org/project/linux-media/list/
+S:     Maintained
+F:     drivers/media/platform/ti-vpe/
+
 TI CDCE706 CLOCK DRIVER
 M:     Max Filippov <jcmvbkbc@gmail.com>
 S:     Maintained
@@ -11318,7 +11342,7 @@ F:      Documentation/usb/ehci.txt
 F:     drivers/usb/host/ehci*
 
 USB GADGET/PERIPHERAL SUBSYSTEM
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org/gadget
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -11394,7 +11418,7 @@ S:      Maintained
 F:     drivers/net/usb/pegasus.*
 
 USB PHY LAYER
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
@@ -12133,7 +12157,7 @@ F:      drivers/net/hamradio/*scc.c
 F:     drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
-M:     Seth Jennings <sjennings@variantweb.net>
+M:     Seth Jennings <sjenning@redhat.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -12188,7 +12212,7 @@ F:      include/linux/zsmalloc.h
 F:     Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
-M:     Seth Jennings <sjennings@variantweb.net>
+M:     Seth Jennings <sjenning@redhat.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index c65fe37c99e523fee40b84333d2b438e052a422a..682840850f580d6c5361d0e6db4084694e90a1c4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
index 98f2eeee8f681117908a3958a4698de514388bd9..75d8865d763dc7517382d0d84d52e4d8c92a4d6c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
 #include <asm/machvec.h>
-#include <asm-generic/pci-bridge.h>
 
 /*
  * The following structure is used to manage multiple PCI busses.
index 76dde9db79349d0977687623307c934de91e314a..01392bcdfd23a94b99087f7bea2e236426e1e653 100644 (file)
@@ -410,7 +410,7 @@ config ARC_HAS_RTC
        default n
        depends on !SMP
 
-config ARC_HAS_GRTC
+config ARC_HAS_GFRC
        bool "SMP synchronized 64-bit cycle counter"
        default y
        depends on SMP
index f36c047b33cad0c469bd2368bb3e5fa4260535e5..735985974a3136d2a1b1d6184ff754efd0a02743 100644 (file)
@@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
-# CONFIG_ARC_HAS_GRTC is not set
+# CONFIG_ARC_HAS_GFRC is not set
 CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
index 7fac7d85ed6a32bb1abaa4f8e36c5d243cec06c0..fdc5be5b10295d612e0b1b70765c74641d55814c 100644 (file)
@@ -349,14 +349,13 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
        struct bcr_isa isa;
-       struct bcr_timer timers;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
                             fpu_sp:1, fpu_dp:1, pad2:6,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
-                            pad4:8;
+                            timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
        struct bcr_mpy extn_mpy;
        struct bcr_extn_xymem extn_xymem;
index 258b0e5ad3329a614e59204df55d91873e969623..1fc18ee06cf2df7f3dc971241f38f846a857133b 100644 (file)
 /* Was Intr taken in User Mode */
 #define AUX_IRQ_ACT_BIT_U      31
 
-/* 0 is highest level, but taken by FIRQs, if present in design */
-#define ARCV2_IRQ_DEF_PRIO             0
+/*
+ * User space should be interruptable even by lowest prio interrupt
+ * Safe even if actual interrupt priorities is fewer or even one
+ */
+#define ARCV2_IRQ_DEF_PRIO     15
 
 /* seed value for status register */
 #define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
index 46f4e5351b2a56e96d440a27201fc612d2e9c195..847e3bbe387fc92f9b4433bf7e08fc0b11e3ec70 100644 (file)
@@ -39,8 +39,8 @@ struct mcip_cmd {
 #define CMD_DEBUG_SET_MASK             0x34
 #define CMD_DEBUG_SET_SELECT           0x36
 
-#define CMD_GRTC_READ_LO               0x42
-#define CMD_GRTC_READ_HI               0x43
+#define CMD_GFRC_READ_LO               0x42
+#define CMD_GFRC_READ_HI               0x43
 
 #define CMD_IDU_ENABLE                 0x71
 #define CMD_IDU_DISABLE                        0x72
index cbfec79137bf77735fa675d0eb2be57da217ba63..b178302947065660bc4765d86f66276045e29055 100644 (file)
@@ -211,7 +211,11 @@ debug_marker_syscall:
 ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
 ; entry was via Exception in DS which got preempted in kernel).
 ;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
 .Lintr_ret_to_delay_slot:
 debug_marker_ds:
 
@@ -222,18 +226,23 @@ debug_marker_ds:
        ld      r2, [sp, PT_ret]
        ld      r3, [sp, PT_status32]
 
+       ; STAT32 for Int return created from scratch
+       ; (No delay dlot, disable Further intr in trampoline)
+
        bic     r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
        st      r0, [sp, PT_status32]
 
        mov     r1, .Lintr_ret_to_delay_slot_2
        st      r1, [sp, PT_ret]
 
+       ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
        st      r2, [sp, 0]
        st      r3, [sp, 4]
 
        b       .Lisr_ret_fast_path
 
 .Lintr_ret_to_delay_slot_2:
+       ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
        sub     sp, sp, SZ_PT_REGS
        st      r9, [sp, -4]
 
@@ -243,11 +252,19 @@ debug_marker_ds:
        ld      r9, [sp, 4]
        sr      r9, [erstatus]
 
+       ; restore AUX_USER_SP if returning to U mode
+       bbit0   r9, STATUS_U_BIT, 1f
+       ld      r9, [sp, PT_sp]
+       sr      r9, [AUX_USER_SP]
+
+1:
        ld      r9, [sp, 8]
        sr      r9, [erbta]
 
        ld      r9, [sp, -4]
        add     sp, sp, SZ_PT_REGS
+
+       ; return from pure kernel mode to delay slot
        rtie
 
 END(ret_from_exception)
index 0394f9f61b466dea018af3ec371c65a8dbc17acb..942526322ae7125cb1e7910adb0523ed16ae7435 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 
+static int irq_prio;
+
 /*
  * Early Hardware specific Interrupt setup
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,6 +26,14 @@ void arc_init_IRQ(void)
 {
        unsigned int tmp;
 
+       struct irq_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
+#else
+               unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
+#endif
+       } irq_bcr;
+
        struct aux_irq_ctrl {
 #ifdef CONFIG_CPU_BIG_ENDIAN
                unsigned int res3:18, save_idx_regs:1, res2:1,
@@ -46,28 +56,25 @@ void arc_init_IRQ(void)
 
        WRITE_AUX(AUX_IRQ_CTRL, ictrl);
 
-       /* setup status32, don't enable intr yet as kernel doesn't want */
-       tmp = read_aux_reg(0xa);
-       tmp |= ISA_INIT_STATUS_BITS;
-       tmp &= ~STATUS_IE_MASK;
-       asm volatile("flag %0   \n"::"r"(tmp));
-
        /*
         * ARCv2 core intc provides multiple interrupt priorities (upto 16).
         * Typical builds though have only two levels (0-high, 1-low)
         * Linux by default uses lower prio 1 for most irqs, reserving 0 for
         * NMI style interrupts in future (say perf)
-        *
-        * Read the intc BCR to confirm that Linux default priority is avail
-        * in h/w
-        *
-        * Note:
-        *  IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
-        *  is 0 based.
         */
-       tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
-       if (ARCV2_IRQ_DEF_PRIO > tmp)
-               panic("Linux default irq prio incorrect\n");
+
+       READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+
+       irq_prio = irq_bcr.prio;        /* Encoded as N-1 for N levels */
+       pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
+               irq_prio + 1, irq_prio,
+               irq_bcr.firq ? " FIRQ (not used)":"");
+
+       /* setup status32, don't enable intr yet as kernel doesn't want */
+       tmp = read_aux_reg(0xa);
+       tmp |= STATUS_AD_MASK | (irq_prio << 1);
+       tmp &= ~STATUS_IE_MASK;
+       asm volatile("flag %0   \n"::"r"(tmp));
 }
 
 static void arcv2_irq_mask(struct irq_data *data)
@@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
        write_aux_reg(AUX_IRQ_SELECT, data->irq);
-       write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+       write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
 
        /*
         * hw auto enables (linux unmask) all by default
index bd237acdf4f2f9601efbf4c25c45067ddd2b69d9..bc771f58fefb4a9c67c90781bf520d4004f61fa2 100644 (file)
@@ -96,13 +96,13 @@ static void mcip_probe_n_setup(void)
 #ifdef CONFIG_CPU_BIG_ENDIAN
                unsigned int pad3:8,
                             idu:1, llm:1, num_cores:6,
-                            iocoh:1,  grtc:1, dbg:1, pad2:1,
+                            iocoh:1,  gfrc:1, dbg:1, pad2:1,
                             msg:1, sem:1, ipi:1, pad:1,
                             ver:8;
 #else
                unsigned int ver:8,
                             pad:1, ipi:1, sem:1, msg:1,
-                            pad2:1, dbg:1, grtc:1, iocoh:1,
+                            pad2:1, dbg:1, gfrc:1, iocoh:1,
                             num_cores:6, llm:1, idu:1,
                             pad3:8;
 #endif
@@ -116,7 +116,7 @@ static void mcip_probe_n_setup(void)
                IS_AVAIL1(mp.ipi, "IPI "),
                IS_AVAIL1(mp.idu, "IDU "),
                IS_AVAIL1(mp.dbg, "DEBUG "),
-               IS_AVAIL1(mp.grtc, "GRTC"));
+               IS_AVAIL1(mp.gfrc, "GFRC"));
 
        idu_detected = mp.idu;
 
@@ -125,8 +125,8 @@ static void mcip_probe_n_setup(void)
                __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
        }
 
-       if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
-               panic("kernel trying to use non-existent GRTC\n");
+       if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
+               panic("kernel trying to use non-existent GFRC\n");
 }
 
 struct plat_smp_ops plat_smp_ops = {
index e1b87444ea9a0740b9651ad6b68fd39f701121a0..a7edceba5f8447db5c2a2a28be9e87c78ef0a511 100644 (file)
@@ -45,6 +45,7 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 static void read_arc_build_cfg_regs(void)
 {
        struct bcr_perip uncached_space;
+       struct bcr_timer timer;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        unsigned long perip_space;
@@ -53,7 +54,11 @@ static void read_arc_build_cfg_regs(void)
        READ_BCR(AUX_IDENTITY, cpu->core);
        READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
-       READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
+       READ_BCR(ARC_REG_TIMERS_BCR, timer);
+       cpu->extn.timer0 = timer.t0;
+       cpu->extn.timer1 = timer.t1;
+       cpu->extn.rtc = timer.rtc;
+
        cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
        READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
@@ -208,9 +213,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       (unsigned int)(arc_get_core_freq() / 10000) % 100);
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
-                      IS_AVAIL1(cpu->timers.t0, "Timer0 "),
-                      IS_AVAIL1(cpu->timers.t1, "Timer1 "),
-                      IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
+                      IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
+                      IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
+                      IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
                                 CONFIG_ARC_HAS_RTC));
 
        n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
@@ -293,13 +298,13 @@ static void arc_chk_core_config(void)
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        int fpu_enabled;
 
-       if (!cpu->timers.t0)
+       if (!cpu->extn.timer0)
                panic("Timer0 is not present!\n");
 
-       if (!cpu->timers.t1)
+       if (!cpu->extn.timer1)
                panic("Timer1 is not present!\n");
 
-       if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
+       if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
                panic("RTC is not present\n");
 
 #ifdef CONFIG_ARC_HAS_DCCM
@@ -334,6 +339,7 @@ static void arc_chk_core_config(void)
                panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
 
        if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+           IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
            !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
                panic("llock/scond livelock workaround missing\n");
 }
index dfad287f1db1c6b55b86faacc0b40d2472636795..156d9833ff84b5c77b7a7bd95d2be15c81d7cbf9 100644 (file)
@@ -62,7 +62,7 @@
 
 /********** Clock Source Device *********/
 
-#ifdef CONFIG_ARC_HAS_GRTC
+#ifdef CONFIG_ARC_HAS_GFRC
 
 static int arc_counter_setup(void)
 {
@@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs)
 
        local_irq_save(flags);
 
-       __mcip_cmd(CMD_GRTC_READ_LO, 0);
+       __mcip_cmd(CMD_GFRC_READ_LO, 0);
        stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
 
-       __mcip_cmd(CMD_GRTC_READ_HI, 0);
+       __mcip_cmd(CMD_GFRC_READ_HI, 0);
        stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
 
        local_irq_restore(flags);
@@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs)
 }
 
 static struct clocksource arc_counter = {
-       .name   = "ARConnect GRTC",
+       .name   = "ARConnect GFRC",
        .rating = 400,
        .read   = arc_counter_read,
        .mask   = CLOCKSOURCE_MASK(64),
index 4f799e567fc870502ae147f3c26c5bb402315a9d..cc95ff8f07cbce92ea93398a57605f0b531169e7 100644 (file)
@@ -1337,7 +1337,6 @@ config BIG_LITTLE
 config BL_SWITCHER
        bool "big.LITTLE switcher support"
        depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
-       select ARM_CPU_SUSPEND
        select CPU_PM
        help
          The big.LITTLE "switcher" provides the core functionality to
@@ -2111,7 +2110,8 @@ config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
 config ARM_CPU_SUSPEND
-       def_bool PM_SLEEP
+       def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW
+       depends on ARCH_SUSPEND_POSSIBLE
 
 config ARCH_HIBERNATION_POSSIBLE
        bool
index c6b6175d020329ac74eeefb0eebf1a6c353d6ea8..1098e91d6d3f34ff5ca9abd4784dd1793897115c 100644 (file)
@@ -1368,6 +1368,7 @@ config DEBUG_SIRFSOC_UART
 config DEBUG_LL_INCLUDE
        string
        default "debug/sa1100.S" if DEBUG_SA1100
+       default "debug/palmchip.S" if DEBUG_UART_8250_PALMCHIP
        default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
        default "debug/at91.S" if DEBUG_AT91_UART
        default "debug/asm9260.S" if DEBUG_ASM9260_UART
@@ -1656,6 +1657,14 @@ config DEBUG_UART_8250_WORD
                DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
                DEBUG_BRCMSTB_UART
 
+config DEBUG_UART_8250_PALMCHIP
+       bool "8250 UART is Palmchip BK-310x"
+       depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+       help
+         Palmchip provides a UART implementation compatible with 16550
+         except for having a different register layout.  Say Y here if
+         the debug UART is of this type.
+
 config DEBUG_UART_8250_FLOW_CONTROL
        bool "Enable flow control for 8250 UART"
        depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
index fe254108d1d92c88bb05b90221ca314edcaceedb..c2aedce3fea389332d750950b8ef1937c496cfcf 100644 (file)
@@ -147,8 +147,7 @@ textofs-$(CONFIG_PM_H1940)      := 0x00108000
 ifeq ($(CONFIG_ARCH_SA1100),y)
 textofs-$(CONFIG_SA1111) := 0x00208000
 endif
-textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
-textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
+textofs-$(CONFIG_ARCH_QCOM_A_FAMILY) := 0x00208000
 textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
 
 # Machine directory name.  This list is sorted alphanumerically
diff --git a/arch/arm/arm-soc-for-next-contents.txt b/arch/arm/arm-soc-for-next-contents.txt
new file mode 100644 (file)
index 0000000..983e07b
--- /dev/null
@@ -0,0 +1,51 @@
+fixes
+
+next/fixes-non-critical
+
+next/cleanup
+       patch
+               ARM: drop unused Makefile.boot of Multiplatform SoCs
+               ARM: integrator: remove redundant select in Kconfig
+               ARM: netx: remove redundant "depends on ARCH_NETX"
+       renesas/cleanup
+               git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas tags/renesas-cleanup-for-v4.6
+       mvebu/cleanup
+               git://git.infradead.org/linux-mvebu tags/mvebu-cleanup-4.6-1
+
+next/soc
+       patch
+               ARM: debug: add support for Palmchip BK-310x UART
+       mvebu/drivers
+               git://git.infradead.org/linux-mvebu tags/mvebu-drivers-4.6-1
+
+next/arm64
+       patch
+               arm64: defconfig: add spmi and usb related configs
+
+next/dt
+
+next/dt64
+       patch
+               MAINTAINERS: Adding Maintainers for AMD Seattle Device Tree
+               dtb: amd: Fix GICv2 hypervisor and virtual interface sizes
+               dtb: amd: Fix DMA ranges in device tree
+               dtb: amd: Fix typo in SPI device nodes
+               dtb: amd: Misc changes for I2C device nodes
+               dtb: amd: Misc changes for SATA device tree nodes
+               dtb: amd: Misc changes for GPIO devices
+               dtb: amd: Add PERF CCN-504 device tree node
+               dtb: amd: Add KCS device tree node
+               dtb: amd: Add AMD XGBE device tree file
+               dtb: amd: Add support for new AMD Overdrive boards
+               dtb: amd: Add support for AMD/Linaro 96Boards Enterprise Edition Server board
+       renesas/dt64
+               git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas tags/renesas-arm64-dt-for-v4.6
+
+next/defconfig
+       renesas/defconfig
+               git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas tags/renesas-defconfig-for-v4.6
+       mvebu/defconfig
+               git://git.infradead.org/linux-mvebu tags/mvebu-defconfig-4.6-1
+
+next/drivers
+
index 4c23a68a0917049e4c7e9e864dcebca38d6b75b2..7a6a58ef8aaf815728fd59b0ca7af752495595b0 100644 (file)
@@ -106,6 +106,15 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 
+# -fstack-protector-strong triggers protection checks in this code,
+# but it is being used too early to link to meaningful stack_chk logic.
+nossp_flags := $(call cc-option, -fno-stack-protector)
+CFLAGS_atags_to_fdt.o := $(nossp_flags)
+CFLAGS_fdt.o := $(nossp_flags)
+CFLAGS_fdt_ro.o := $(nossp_flags)
+CFLAGS_fdt_rw.o := $(nossp_flags)
+CFLAGS_fdt_wip.o := $(nossp_flags)
+
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 asflags-y := -DZIMAGE
 
index a4a6d70e8b26ceaf2d9dc7b3af37e4e4bb32eba1..b0726b5e3f8fd7d3c74609d7426dc642dc8610f9 100644 (file)
@@ -60,6 +60,7 @@ dtb-$(CONFIG_ARCH_AXXIA) += \
        axm5516-amarillo.dtb
 dtb-$(CONFIG_ARCH_BCM2835) += \
        bcm2835-rpi-b.dtb \
+       bcm2835-rpi-a.dtb \
        bcm2835-rpi-b-rev2.dtb \
        bcm2835-rpi-b-plus.dtb \
        bcm2835-rpi-a-plus.dtb \
@@ -156,7 +157,8 @@ dtb-$(CONFIG_ARCH_INTEGRATOR) += \
 dtb-$(CONFIG_ARCH_KEYSTONE) += \
        k2hk-evm.dtb \
        k2l-evm.dtb \
-       k2e-evm.dtb
+       k2e-evm.dtb \
+       k2g-evm.dtb
 dtb-$(CONFIG_MACH_KIRKWOOD) += \
        kirkwood-b3.dtb \
        kirkwood-blackarmor-nas220.dtb \
@@ -189,9 +191,12 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \
        kirkwood-is2.dtb \
        kirkwood-km_kirkwood.dtb \
        kirkwood-laplug.dtb \
+       kirkwood-linkstation-lsqvl.dtb \
+       kirkwood-linkstation-lsvl.dtb \
+       kirkwood-linkstation-lswsxl.dtb \
+       kirkwood-linkstation-lswvl.dtb \
+       kirkwood-linkstation-lswxl.dtb \
        kirkwood-lschlv2.dtb \
-       kirkwood-lswvl.dtb \
-       kirkwood-lswxl.dtb \
        kirkwood-lsxhl.dtb \
        kirkwood-mplcec4.dtb \
        kirkwood-mv88f6281gtw-ge.dtb \
@@ -332,6 +337,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
        imx6q-gw551x.dtb \
        imx6q-gw552x.dtb \
        imx6q-hummingboard.dtb \
+       imx6q-icore-rqs.dtb \
        imx6q-nitrogen6x.dtb \
        imx6q-nitrogen6_max.dtb \
        imx6q-novena.dtb \
@@ -514,6 +520,7 @@ dtb-$(CONFIG_SOC_DRA7XX) += \
 dtb-$(CONFIG_ARCH_ORION5X) += \
        orion5x-lacie-d2-network.dtb \
        orion5x-lacie-ethernet-disk-mini-v2.dtb \
+       orion5x-linkstation-lsgl.dtb \
        orion5x-linkstation-lswtgl.dtb \
        orion5x-lswsgl.dtb \
        orion5x-maxtor-shared-storage-2.dtb \
@@ -666,6 +673,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
        sun7i-a20-cubieboard2.dtb \
        sun7i-a20-cubietruck.dtb \
        sun7i-a20-hummingbird.dtb \
+       sun7i-a20-itead-ibox.dtb \
        sun7i-a20-i12-tvbox.dtb \
        sun7i-a20-icnova-swac.dtb \
        sun7i-a20-m3.dtb \
@@ -691,6 +699,8 @@ dtb-$(CONFIG_MACH_SUN8I) += \
        sun8i-a33-ippo-q8h-v1.2.dtb \
        sun8i-a33-q8-tablet.dtb \
        sun8i-a33-sinlinx-sina33.dtb \
+       sun8i-a83t-allwinner-h8homlet-v2.dtb \
+       sun8i-a83t-cubietruck-plus.dtb \
        sun8i-h3-orangepi-plus.dtb
 dtb-$(CONFIG_MACH_SUN9I) += \
        sun9i-a80-optimus.dtb \
index 42e9b665582ab2672f6b112549d4178e5dc63d38..5d5fb62d8fbe783c4ccd54fae36f1c0fc9f8ba2f 100644 (file)
@@ -11,6 +11,7 @@
 /dts-v1/;
 
 #include "am33xx.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        model = "CompuLab CM-T335";
                regulator-max-microvolt = <3300000>;
        };
 
+       /* Regulator for WiFi */
+       vwlan_fixed: fixedregulator@2 {
+               compatible = "regulator-fixed";
+               regulator-name = "vwlan_fixed";
+               gpio = <&gpio0 20 GPIO_ACTIVE_HIGH>; /* gpio0_20 */
+               enable-active-high;
+               regulator-boot-off;
+       };
+
        backlight {
                compatible = "pwm-backlight";
                pwms = <&ecap0 0 50000 0>;
                brightness-levels = <0 51 53 56 62 75 101 152 255>;
                default-brightness-level = <8>;
        };
+
+       sound {
+               compatible = "simple-audio-card";
+               simple-audio-card,name = "cm-t335";
+
+               simple-audio-card,widgets =
+                       "Microphone", "Mic Jack",
+                       "Line", "Line In",
+                       "Headphone", "Headphone Jack";
+
+               simple-audio-card,routing =
+                       "Headphone Jack", "LHPOUT",
+                       "Headphone Jack", "RHPOUT",
+                       "LLINEIN", "Line In",
+                       "RLINEIN", "Line In",
+                       "MICIN", "Mic Jack";
+
+               simple-audio-card,format = "i2s";
+               simple-audio-card,bitclock-master = <&sound_master>;
+               simple-audio-card,frame-master = <&sound_master>;
+
+               simple-audio-card,cpu {
+                       sound-dai = <&mcasp1>;
+               };
+
+               sound_master: simple-audio-card,codec {
+                       sound-dai = <&tlv320aic23>;
+                       system-clock-frequency = <12000000>;
+               };
+       };
 };
 
 &am33xx_pinmux {
                >;
        };
 
+       dcan0_pins: pinmux_dcan0_pins {
+               pinctrl-single,pins = <
+                       /* uart1_ctsn.dcan0_tx */
+                       AM33XX_IOPAD(0x978, PIN_OUTPUT | MUX_MODE2)
+                       /* uart1_rtsn.dcan0_rx */
+                       AM33XX_IOPAD(0x97C, PIN_INPUT | MUX_MODE2)
+               >;
+       };
+
+       dcan1_pins: pinmux_dcan1_pins {
+               pinctrl-single,pins = <
+                       /* uart1_rxd.dcan1_tx */
+                       AM33XX_IOPAD(0x980, PIN_OUTPUT | MUX_MODE2)
+                       /* uart1_txd.dcan1_rx */
+                       AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE2)
+               >;
+       };
+
        ecap0_pins: pinmux_ecap0_pins {
                pinctrl-single,pins = <
                        /* eCAP0_in_PWM0_out.eCAP0_in_PWM0_out MODE0 */
                >;
        };
 
+       spi0_pins: pinmux_spi0_pins {
+               pinctrl-single,pins = <
+                       /* spi0_sclk.spi0_sclk */
+                       AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE0)
+                       /* spi0_d0.spi0_d0 */
+                       AM33XX_IOPAD(0x954, PIN_OUTPUT_PULLUP | MUX_MODE0)
+                       /* spi0_d1.spi0_d1 */
+                       AM33XX_IOPAD(0x958, PIN_INPUT | MUX_MODE0)
+                       /* spi0_cs0.spi0_cs0 */
+                       AM33XX_IOPAD(0x95C, PIN_OUTPUT | MUX_MODE0)
+                       /* spi0_cs1.spi0_cs1 */
+                       AM33XX_IOPAD(0x960, PIN_OUTPUT | MUX_MODE0)
+               >;
+       };
+
        /* wl1271 bluetooth */
        bluetooth_pins: pinmux_bluetooth_pins {
                pinctrl-single,pins = <
                        AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLUP | MUX_MODE7)
                >;
        };
+
+       /* TLV320AIC23B codec */
+       mcasp1_pins: pinmux_mcasp1_pins {
+               pinctrl-single,pins = <
+                       /* MII1_CRS.mcasp1_aclkx */
+                       AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE4)
+                       /* MII1_RX_ER.mcasp1_fsx */
+                       AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE4)
+                       /* MII1_COL.mcasp1_axr2 */
+                       AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE4)
+                       /* RMII1_REF_CLK.mcasp1_axr3 */
+                       AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE4)
+               >;
+       };
+
+       /* wl1271 WiFi */
+       wifi_pins: pinmux_wifi_pins {
+               pinctrl-single,pins = <
+                       /* EMU1.gpio3_8 - WiFi IRQ */
+                       AM33XX_IOPAD(0x9e8, PIN_INPUT_PULLUP | MUX_MODE7)
+                       /* XDMA_EVENT_INTR1.gpio0_20 - WiFi enable */
+                       AM33XX_IOPAD(0x9b4, PIN_OUTPUT | MUX_MODE7)
+               >;
+       };
 };
 
 &uart0 {
@@ -264,6 +361,13 @@ status = "okay";
                compatible = "emmicro,em3027";
                reg = <0x56>;
        };
+       /* Audio codec */
+       tlv320aic23: codec@1a {
+               compatible = "ti,tlv320aic23";
+               reg = <0x1a>;
+               #sound-dai-cells= <0>;
+               status = "okay";
+       };
 };
 
 &usb {
@@ -394,3 +498,70 @@ status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
 };
+
+&dcan0 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&dcan0_pins>;
+};
+
+&dcan1 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&dcan1_pins>;
+};
+
+/* Touschscreen and analog digital converter */
+&tscadc {
+       status = "okay";
+       tsc {
+               ti,wires = <4>;
+               ti,x-plate-resistance = <200>;
+               ti,coordinate-readouts = <5>;
+               ti,wire-config = <0x01 0x10 0x23 0x32>;
+               ti,charge-delay = <0x400>;
+       };
+
+       adc {
+               ti,adc-channels = <4 5 6 7>;
+       };
+};
+
+/* CPU audio */
+&mcasp1 {
+               pinctrl-names = "default";
+               pinctrl-0 = <&mcasp1_pins>;
+
+               op-mode = <0>;          /* MCASP_IIS_MODE */
+               tdm-slots = <2>;
+               /* 16 serializers */
+               num-serializer = <16>;
+               serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+                       0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0
+               >;
+               tx-num-evt = <1>;
+               rx-num-evt = <1>;
+
+               #sound-dai-cells= <0>;
+               status = "okay";
+};
+
+&spi0 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&spi0_pins>;
+       ti,pindir-d0-out-d1-in = <1>;
+       /* WLS1271 WiFi */
+       wlcore: wlcore@1 {
+               compatible = "ti,wl1271";
+               pinctrl-names = "default";
+               pinctrl-0 = <&wifi_pins>;
+               reg = <1>;
+               spi-max-frequency = <48000000>;
+               clock-xtal;
+               ref-clock-frequency = <38400000>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+               vwlan-supply = <&vwlan_fixed>;
+       };
+};
index 04885f9f959e21fcffb388940d64be0b6eb32869..1fafaad516ba0481de34cc12320fb9e51c2884ce 100644 (file)
                        ti,mbox-num-users = <4>;
                        ti,mbox-num-fifos = <8>;
                        mbox_wkupm3: wkup_m3 {
+                               ti,mbox-send-noirq;
                                ti,mbox-tx = <0 0 0>;
                                ti,mbox-rx = <0 0 3>;
                        };
index df955ba4dc6203273ff795cc1492e698764eda0d..92068fbf8b577440409c8e029d7cfba28da43cf8 100644 (file)
@@ -73,7 +73,7 @@
        global_timer: timer@48240200 {
                compatible = "arm,cortex-a9-global-timer";
                reg = <0x48240200 0x100>;
-               interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                interrupt-parent = <&gic>;
                clocks = <&mpu_periphclk>;
        };
@@ -81,7 +81,7 @@
        local_timer: timer@48240600 {
                compatible = "arm,cortex-a9-twd-timer";
                reg = <0x48240600 0x100>;
-               interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
                interrupt-parent = <&gic>;
                clocks = <&mpu_periphclk>;
        };
                        ti,mbox-num-users = <4>;
                        ti,mbox-num-fifos = <8>;
                        mbox_wkupm3: wkup_m3 {
+                               ti,mbox-send-noirq;
                                ti,mbox-tx = <0 0 0>;
                                ti,mbox-rx = <0 0 3>;
                        };
index 64d43325bcbc73aa08bf1da025c255883737f233..ecd09ab6d581bf95aa0b8bee8d596c3da8b9ee22 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pixcir_ts_pins>;
                reg = <0x5c>;
-               interrupt-parent = <&gpio3>;
-               interrupts = <22 0>;
 
                attb-gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
 
                 * 0x264 represents the offset of padconf register of
                 * gpio3_22 from am43xx_pinmux base.
                 */
-               interrupts-extended = <&gpio3 22 IRQ_TYPE_NONE>,
+               interrupts-extended = <&gpio3 22 IRQ_TYPE_EDGE_FALLING>,
                                      <&am43xx_pinmux 0x264>;
                interrupt-names = "tsc", "wakeup";
 
index 746fd2b179587fe4522724f5b42b16202e97edef..d580e2b70f9a65f7dc078799add6d56628ae0169 100644 (file)
                pinctrl-0 = <&pixcir_ts_pins>;
                reg = <0x5c>;
                interrupt-parent = <&gpio1>;
-               interrupts = <17 0>;
+               interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
 
                attb-gpio = <&gpio1 17 GPIO_ACTIVE_HIGH>;
 
index c53882643ae96b6da8b15c6a26e5fd93ba3300cb..8d93882dc8d541a77c870e635813c32a7c77b79d 100644 (file)
                        DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d20.rgmii1_rd3 */
                        DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d21.rgmii1_rd2 */
                        DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d22.rgmii1_rd1 */
-                       DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLUP | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */
+                       DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */
                >;
        };
 
        pinctrl-names = "default";
        pinctrl-0 = <&qspi1_pins>;
 
-       spi-max-frequency = <20000000>;
+       spi-max-frequency = <48000000>;
 
        spi_flash: spi_flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "spansion,m25p80", "jedec,spi-nor";
                reg = <0>;                              /* CS0 */
-               spi-max-frequency = <20000000>;
+               spi-max-frequency = <48000000>;
 
                partition@0 {
                        label = "uboot";
 
 &cpsw_emac0 {
        phy_id = <&davinci_mdio>, <0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-txid";
        dual_emac_res_vlan = <0>;
 };
 
 &cpsw_emac1 {
        phy_id = <&davinci_mdio>, <1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-txid";
        dual_emac_res_vlan = <1>;
 };
 
 };
 
 &usb2 {
-       dr_mode = "peripheral";
+       dr_mode = "host";
 };
 
 &mcasp3 {
index 77bb8e17401a26cad24dcfd2ab5a7c9ddbd4d312..988e99632d49953d012b72e674f8fa040883a1c4 100644 (file)
@@ -25,8 +25,8 @@
 &dra7_pmx_core {
        uart3_pins_default: uart3_pins_default {
                pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_SLEW | MUX_MODE2)   /* uart2_ctsn.uart3_rxd */
-                       DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_SLEW | MUX_MODE1)   /* uart2_rtsn.uart3_txd */
+                       DRA7XX_CORE_IOPAD(0x3648, PIN_INPUT_SLEW | MUX_MODE0)   /* uart3_rxd */
+                       DRA7XX_CORE_IOPAD(0x364c, PIN_INPUT_SLEW | MUX_MODE0)   /* uart3_txd */
                >;
        };
 
        pinctrl-0 = <&i2c5_pins_default>;
        clock-frequency = <400000>;
 
-       eeprom_base: atmel@50 {
+       eeprom_base: atmel@54 {
                compatible = "atmel,24c08";
-               reg = <0x50>;
+               reg = <0x54>;
                pagesize = <16>;
        };
 
index 3aa980ad64f0c47f7603d5144ffa04211578b2e1..d5e19cd4d256bc1e8fd51b21201ca576278ee782 100644 (file)
 &pinctrl {
        pwr_led_pin: pwr-led-pin {
                marvell,pins = "mpp63";
-               marvell,function = "gpo";
+               marvell,function = "gpio";
        };
 
        stat_led_pins: stat-led-pins {
index faa474874cb8e5c4436154cf6dcbf38f7daa60ef..11565752b9f6b7f83ea56187ebcac73deaf91a36 100644 (file)
 
        backup_led_pin: backup-led-pin {
                marvell,pins = "mpp63";
-               marvell,function = "gpo";
+               marvell,function = "gpio";
        };
 
        power_led_pin: power-led-pin {
index 836bcc07afc5babe199baaea184b3149d6020dbd..8ca7a4340c0ff8301a80b1d6e75b52770cfdeebd 100644 (file)
 
        fan_ctrl_high_pin: fan-ctrl-high-pin {
                marvell,pins = "mpp63";
-               marvell,function = "gpo";
+               marvell,function = "gpio";
        };
 
        fan_alarm_pin: fan-alarm-pin {
index acd5b1519edb2be2f4cd58246fba337a7059ffa1..86fde0bf552b0b1596353e7351ab9f13b91dbd46 100644 (file)
                                };
                        };
 
+                       /* CON3 */
                        ethernet@30000 {
                                status = "okay";
                                phy = <&phy2>;
                                phy-mode = "sgmii";
                        };
 
+                       /* CON2 */
                        ethernet@34000 {
                                status = "okay";
                                phy = <&phy1>;
                                phy-mode = "sgmii";
                        };
 
+                       /* CON4 */
                        ethernet@70000 {
                                pinctrl-names = "default";
 
index cd316021d6ce2b72af13fa6fc3ccb7b833d423f0..51943598d8585609c5c06a068b7077271ccba45a 100644 (file)
@@ -44,8 +44,8 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-       model = "Marvell Armada 38GP";
-       compatible = "marvell,a385-gp", "marvell,armada388", "marvell,armada380";
+       model = "Marvell Armada 388 DB-88F6820-GP";
+       compatible = "marvell,a388-gp", "marvell,armada388", "marvell,armada380";
 
        chosen {
                stdout-path = "serial0:115200n8";
 
                        /* CON5 */
                        usb3@f0000 {
-                               vcc-supply = <&reg_usb2_1_vbus>;
+                               usb-phy = <&usb2_1_phy>;
                                status = "okay";
                        };
 
                        /* CON7 */
                        usb3@f8000 {
-                               vcc-supply = <&reg_usb3_vbus>;
+                               usb-phy = <&usb3_phy>;
                                status = "okay";
                        };
                };
                };
        };
 
+       usb2_1_phy: usb2_1_phy {
+               compatible = "usb-nop-xceiv";
+               vcc-supply = <&reg_usb2_1_vbus>;
+       };
+
+       usb3_phy: usb3_phy {
+               compatible = "usb-nop-xceiv";
+               vcc-supply = <&reg_usb3_vbus>;
+       };
+
        reg_usb3_vbus: usb3-vbus {
                compatible = "regulator-fixed";
                regulator-name = "usb3-vbus";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
                enable-active-high;
-               regulator-always-on;
                gpio = <&expander1 15 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
                enable-active-high;
-               regulator-always-on;
                gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 2 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata0";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata0>;
        };
 
                regulator-name = "v12.0-sata0";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata0>;
        };
 
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 3 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata1";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata1>;
        };
 
                regulator-name = "v12.0-sata1";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata1>;
        };
 
                compatible = "regulator-fixed";
                regulator-name = "pwr_en_sata2";
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 11 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata2";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata2>;
        };
 
                regulator-name = "v12.0-sata2";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata2>;
        };
 
                compatible = "regulator-fixed";
                regulator-name = "pwr_en_sata3";
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 12 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata3";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata3>;
        };
 
                regulator-name = "v12.0-sata3";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata3>;
        };
 };
index e8b7f67267723241730c8279902b82b8340d8118..b50784de86e80d5ee75f9154bf00a86f1100922b 100644 (file)
                                reg = <0x22000 0x1000>;
                        };
 
+                       /*
+                        * As a special exception to the "order by
+                        * register address" rule, the eth0 node is
+                        * placed here to ensure that it gets
+                        * registered as the first interface, since
+                        * the network subsystem doesn't allow naming
+                        * interfaces using DT aliases. Without this,
+                        * the ordering of interfaces is different
+                        * from the one used in U-Boot and the
+                        * labeling of interfaces on the boards, which
+                        * is very confusing for users.
+                        */
+                       eth0: ethernet@70000 {
+                               compatible = "marvell,armada-370-neta";
+                               reg = <0x70000 0x4000>;
+                               interrupts-extended = <&mpic 8>;
+                               clocks = <&gateclk 4>;
+                               tx-csum-limit = <9800>;
+                               status = "disabled";
+                       };
+
                        eth1: ethernet@30000 {
                                compatible = "marvell,armada-370-neta";
                                reg = <0x30000 0x4000>;
                                };
                        };
 
-                       eth0: ethernet@70000 {
-                               compatible = "marvell,armada-370-neta";
-                               reg = <0x70000 0x4000>;
-                               interrupts-extended = <&mpic 8>;
-                               clocks = <&gateclk 4>;
-                               tx-csum-limit = <9800>;
-                               status = "disabled";
-                       };
-
                        mdio: mdio@72004 {
                                #address-cells = <1>;
                                #size-cells = <0>;
index 13cf69a8d0fb392b264910f014cf7ff8434f9e2b..fb9e1bbf23385b85b0b82ddb153b922b2d2e0178 100644 (file)
                                nand-on-flash-bbt;
 
                                partitions {
+                                       compatible = "fixed-partitions";
                                        #address-cells = <1>;
                                        #size-cells = <1>;
 
index 77ddff036409f7cb84d28342f8b973ce9792d09e..e683856c507c8bedacb5f8746a43cf9e4aef2207 100644 (file)
 
                        macb0: ethernet@f8008000 {
                                pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_macb0_default>;
+                               pinctrl-0 = <&pinctrl_macb0_default &pinctrl_macb0_phy_irq>;
                                phy-mode = "rmii";
                                status = "okay";
+
+                               ethernet-phy@1 {
+                                       reg = <0x1>;
+                                       interrupt-parent = <&pioA>;
+                                       interrupts = <73 IRQ_TYPE_LEVEL_LOW>;
+                               };
                        };
 
                        pdmic@f8018000 {
                                        bias-disable;
                                };
 
+                               pinctrl_macb0_phy_irq: macb0_phy_irq {
+                                       pinmux = <PIN_PC9__GPIO>;
+                               };
+
                                pinctrl_pdmic_default: pdmic_default {
                                        pinmux = <PIN_PB26__PDMIC_DAT>,
                                                <PIN_PB27__PDMIC_CLK>;
index 131614f28e758653e34cc2b993bb6a5a28f20bbb..569026e8f96cadaf25eeb10ca207c02c7f175121 100644 (file)
                        macb0: ethernet@f8020000 {
                                phy-mode = "rmii";
                                status = "okay";
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
 
                                phy0: ethernet-phy@1 {
                                        interrupt-parent = <&pioE>;
-                                       interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+                                       interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
                                        reg = <1>;
                                };
                        };
                                                atmel,pins =
                                                        <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
                                        };
+                                       pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
+                                               atmel,pins =
+                                                       <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+                                       };
                                };
                        };
                };
index 2d4a33100af6bdc4fcd4a3500467456673f54325..4e98cda974032221dbf5a0917de97807653cf943 100644 (file)
                        };
 
                        macb0: ethernet@f8020000 {
+                               pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
                                phy-mode = "rmii";
                                status = "okay";
+
+                               ethernet-phy@1 {
+                                       reg = <0x1>;
+                                       interrupt-parent = <&pioE>;
+                                       interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+                               };
                        };
 
                        mmc1: mmc@fc000000 {
 
                        pinctrl@fc06a000 {
                                board {
+                                       pinctrl_macb0_phy_irq: macb0_phy_irq {
+                                               atmel,pins =
+                                                       <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+                                       };
                                        pinctrl_mmc0_cd: mmc0_cd {
                                                atmel,pins =
                                                        <AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
index ca4ddf86817ab64dc6a8a31193191460bfaf8d4b..626c67d666269d5e878c650b76e8babc424bd48a 100644 (file)
        };
 
        panel: panel {
-               compatible = "qd,qd43003c0-40", "simple-panel";
+               compatible = "qiaodian,qd43003c0-40", "simple-panel";
                backlight = <&backlight>;
                power-supply = <&panel_reg>;
                #address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts
new file mode 100644 (file)
index 0000000..ddbbbbd
--- /dev/null
@@ -0,0 +1,24 @@
+/dts-v1/;
+#include "bcm2835.dtsi"
+#include "bcm2835-rpi.dtsi"
+
+/ {
+       compatible = "raspberrypi,model-a", "brcm,bcm2835";
+       model = "Raspberry Pi Model A";
+
+       leds {
+               act {
+                       gpios = <&gpio 16 1>;
+               };
+       };
+};
+
+&gpio {
+       pinctrl-0 = <&gpioout &alt0 &i2s_alt2 &alt3>;
+
+       /* I2S interface */
+       i2s_alt2: i2s_alt2 {
+               brcm,pins = <28 29 30 31>;
+               brcm,function = <BCM2835_FSEL_ALT2>;
+       };
+};
index 3afb9fefe2d1fb7cd109e54cbb47856c9d9b520a..76bdbcafab18e8ea7c4e06decd315540aac7ef33 100644 (file)
@@ -1,3 +1,5 @@
+#include <dt-bindings/power/raspberrypi-power.h>
+
 / {
        memory {
                reg = <0 0x10000000>;
                        compatible = "raspberrypi,bcm2835-firmware";
                        mboxes = <&mailbox>;
                };
+
+               power: power {
+                       compatible = "raspberrypi,bcm2835-power";
+                       firmware = <&firmware>;
+                       #power-domain-cells = <1>;
+               };
        };
 };
 
        status = "okay";
        bus-width = <4>;
 };
+
+&pwm {
+       status = "okay";
+};
+
+&usb {
+       power-domains = <&power RPI_POWER_DOMAIN_USB>;
+};
index 971e741e5467b24337f712f5b85e8a4816cdaa4e..05ff5ce91c957786794b68c7e0eae9b83c667545 100644 (file)
@@ -1,5 +1,6 @@
 #include <dt-bindings/pinctrl/bcm2835.h>
 #include <dt-bindings/clock/bcm2835.h>
+#include <dt-bindings/clock/bcm2835-aux.h>
 #include "skeleton.dtsi"
 
 /* This include file covers the common peripherals and configuration between
                        #interrupt-cells = <2>;
                };
 
-               uart0: uart@7e201000 {
+               uart0: serial@7e201000 {
                        compatible = "brcm,bcm2835-pl011", "arm,pl011", "arm,primecell";
                        reg = <0x7e201000 0x1000>;
                        interrupts = <2 25>;
                        clocks = <&clocks BCM2835_CLOCK_VPU>;
                };
 
+               spi1: spi@7e215080 {
+                       compatible = "brcm,bcm2835-aux-spi";
+                       reg = <0x7e215080 0x40>;
+                       interrupts = <1 29>;
+                       clocks = <&aux BCM2835_AUX_CLOCK_SPI1>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               spi2: spi@7e2150c0 {
+                       compatible = "brcm,bcm2835-aux-spi";
+                       reg = <0x7e2150c0 0x40>;
+                       interrupts = <1 29>;
+                       clocks = <&aux BCM2835_AUX_CLOCK_SPI2>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               pwm: pwm@7e20c000 {
+                       compatible = "brcm,bcm2835-pwm";
+                       reg = <0x7e20c000 0x28>;
+                       clocks = <&clocks BCM2835_CLOCK_PWM>;
+                       assigned-clocks = <&clocks BCM2835_CLOCK_PWM>;
+                       assigned-clock-rates = <10000000>;
+                       #pwm-cells = <2>;
+                       status = "disabled";
+               };
+
                sdhci: sdhci@7e300000 {
                        compatible = "brcm,bcm2835-sdhci";
                        reg = <0x7e300000 0x100>;
                        status = "disabled";
                };
 
-               usb@7e980000 {
+               usb: usb@7e980000 {
                        compatible = "brcm,bcm2835-usb";
                        reg = <0x7e980000 0x10000>;
                        interrupts = <1 9>;
index 57795da616cb40850428861c7756164ace137937..bcce6f50c93d9d2579b7e9b524f65ba8a11af40c 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
@@ -53,8 +54,8 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        clocks@e0110000 {
        timer@e0180000 {
                compatible = "renesas,em-sti";
                reg = <0xe0180000 0x54>;
-               interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&sti_sclk>;
                clock-names = "sclk";
        };
        uart0: serial@e1020000 {
                compatible = "renesas,em-uart";
                reg = <0xe1020000 0x38>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usia_u0_sclk>;
                clock-names = "sclk";
        };
        uart1: serial@e1030000 {
                compatible = "renesas,em-uart";
                reg = <0xe1030000 0x38>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usib_u1_sclk>;
                clock-names = "sclk";
        };
        uart2: serial@e1040000 {
                compatible = "renesas,em-uart";
                reg = <0xe1040000 0x38>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usib_u2_sclk>;
                clock-names = "sclk";
        };
        uart3: serial@e1050000 {
                compatible = "renesas,em-uart";
                reg = <0xe1050000 0x38>;
-               interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usib_u3_sclk>;
                clock-names = "sclk";
        };
        gpio0: gpio@e0050000 {
                compatible = "renesas,em-gio";
                reg = <0xe0050000 0x2c>, <0xe0050040 0x20>;
-               interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 68 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
                #gpio-cells = <2>;
        gpio1: gpio@e0050080 {
                compatible = "renesas,em-gio";
                reg = <0xe0050080 0x2c>, <0xe00500c0 0x20>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 70 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 32>;
                #gpio-cells = <2>;
        gpio2: gpio@e0050100 {
                compatible = "renesas,em-gio";
                reg = <0xe0050100 0x2c>, <0xe0050140 0x20>;
-               interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 72 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
                #gpio-cells = <2>;
        gpio3: gpio@e0050180 {
                compatible = "renesas,em-gio";
                reg = <0xe0050180 0x2c>, <0xe00501c0 0x20>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 74 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
                #gpio-cells = <2>;
        gpio4: gpio@e0050200 {
                compatible = "renesas,em-gio";
                reg = <0xe0050200 0x2c>, <0xe0050240 0x20>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 76 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 31>;
                #gpio-cells = <2>;
                #size-cells = <0>;
                compatible = "renesas,iic-emev2";
                reg = <0xe0070000 0x28>;
-               interrupts = <0 32 IRQ_TYPE_EDGE_RISING>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>;
                clocks = <&iic0_sclk>;
                clock-names = "sclk";
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-emev2";
                reg = <0xe10a0000 0x28>;
-               interrupts = <0 33 IRQ_TYPE_EDGE_RISING>;
+               interrupts = <GIC_SPI 33 IRQ_TYPE_EDGE_RISING>;
                clocks = <&iic1_sclk>;
                clock-names = "sclk";
                status = "disabled";
index 443a350858460692d18bbe9f9470b0b3778d27a8..9e2840b59ae8520582717eb85369780090de7416 100644 (file)
@@ -43,7 +43,7 @@
                        linux,code = <KEY_POWER>;
                        label = "power key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
@@ -67,7 +67,7 @@
                        interrupt-parent = <&gpx1>;
                        interrupts = <5 0>;
                        reg = <0x25>;
-                       wakeup;
+                       wakeup-source;
 
                        muic: max77836-muic {
                                compatible = "maxim,max77836-muic";
                interrupt-parent = <&gpx0>;
                interrupts = <7 0>;
                reg = <0x66>;
-               wakeup;
+               wakeup-source;
 
                s2mps14_osc: clocks {
                        compatible = "samsung,s2mps14-clk";
index 3e64d5dcdd60c6abfc9a0a9249f71bb54cd77eb9..1f102f3a1ab1117fd6dae8974878ca4188d03de7 100644 (file)
@@ -43,7 +43,7 @@
                        linux,code = <KEY_POWER>;
                        label = "power key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
@@ -58,7 +58,7 @@
                        interrupt-parent = <&gpx1>;
                        interrupts = <5 0>;
                        reg = <0x25>;
-                       wakeup;
+                       wakeup-source;
 
                        muic: max77836-muic {
                                compatible = "maxim,max77836-muic";
                interrupt-parent = <&gpx0>;
                interrupts = <7 0>;
                reg = <0x66>;
-               wakeup;
+               wakeup-source;
 
                s2mps14_osc: clocks {
                        compatible = "samsung,s2mps14-clk";
index 045785c44c048b3b15c923b07f059744c717cfa4..ca621a92319e6e2cfb62a607b6a8b1b0998c6e67 100644 (file)
                reg = <0x10000000 0x100>;
        };
 
+       sromc@12570000 {
+               compatible = "samsung,exynos-srom";
+               reg = <0x12570000 0x14>;
+       };
+
        mipi_phy: video-phy@10020710 {
                compatible = "samsung,s5pv210-mipi-video-phy";
                #phy-cells = <1>;
index 5821ad87e32c73298469493de5b6d723eb74ab16..ad7394c1d67a92acc2d64315cde287fed691884c 100644 (file)
                        label = "Up";
                        gpios = <&gpx2 0 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_UP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                down {
                        label = "Down";
                        gpios = <&gpx2 1 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_DOWN>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                back {
                        label = "Back";
                        gpios = <&gpx1 7 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_BACK>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                home {
                        label = "Home";
                        gpios = <&gpx1 6 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_HOME>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
                        label = "Menu";
                        gpios = <&gpx1 5 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_MENU>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 104cbb33d2bb0f088b41c943a63bc8d10a523bea..94ca7d36ab3788bd2e80b3294aeb896e20eb8d4a 100644 (file)
@@ -66,7 +66,7 @@
        samsung,keypad-num-rows = <2>;
        samsung,keypad-num-columns = <8>;
        linux,keypad-no-autorepeat;
-       linux,keypad-wakeup;
+       wakeup-source;
        pinctrl-names = "default";
        pinctrl-0 = <&keypad_rows &keypad_cols>;
        status = "okay";
index a50be640f1b03dd0422e6ad8556dcf60b6d5b707..1df2f0bc1d76d81022f1300b6502901df28f8c1f 100644 (file)
                        linux,code = <116>;
                        label = "power";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                ok-key {
index 4f5d37920c8db0375fd3a8c09c8c093987037484..9a75e3effbc98acdbea66e5eedbe3ba04cd1b56b 100644 (file)
@@ -92,7 +92,7 @@
                        linux,code = <171>;
                        label = "config";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                camera-key {
                        linux,code = <116>;
                        label = "power";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                ok-key {
index 395c3ca9601eac317e8325bdf857fb6b294aeb75..5e5d3fecb04cd660d1335e9a26691787d754f51e 100644 (file)
@@ -35,7 +35,7 @@
                        linux,code = <KEY_POWER>;
                        label = "power key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index b44bb682e976705aa23824f421c7cdf419d92151..bf7b21b817e48567d164a91268f5b448d15f1a4f 100644 (file)
@@ -48,7 +48,7 @@
                        linux,code = <KEY_HOME>;
                        label = "home key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 9e2e24c6177a6bde547b28460958a254ab8c048a..8bca699b7f208366e5c25f911e9ded05bf34ac74 100644 (file)
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <2>;
        linux,keypad-no-autorepeat;
-       linux,keypad-wakeup;
+       wakeup-source;
        pinctrl-0 = <&keypad_rows &keypad_cols>;
        pinctrl-names = "default";
        status = "okay";
index a130ab39fa7759763cdfa1d277820cdee5801c4e..a51069f3c03b7abb24a481fc714c44c3f8b3d30f 100644 (file)
@@ -45,7 +45,7 @@
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <8>;
        linux,keypad-no-autorepeat;
-       linux,keypad-wakeup;
+       wakeup-source;
        pinctrl-0 = <&keypad_rows &keypad_cols>;
        pinctrl-names = "default";
        status = "okay";
index a6f78c3da935023e27db331c02f1fca1a8826ee8..ed017cc7b14fcf083bd0b89094a6c250fb809e47 100644 (file)
                        linux,code = <116>;
                        label = "power";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                key-ok {
                        linux,code = <139>;
                        label = "ok";
                        debounce-inteval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index e2439e87ee4ab4b807b2692829f58a4823a73e53..b61d1f6375104ba4ca7ac9e4ad1061aa040fdf03 100644 (file)
                reg = <0x10000000 0x100>;
        };
 
+       sromc@12250000 {
+               compatible = "samsung,exynos-srom";
+               reg = <0x12250000 0x14>;
+       };
+
        combiner: interrupt-controller@10440000 {
                compatible = "samsung,exynos4210-combiner";
                #interrupt-cells = <2>;
index c000532c14446db9cbcb6a942cd7c117a9d5e0aa..8b2acc74aa76fac7bcf73d28ad653e7897f8f7ee 100644 (file)
                        label = "SW-TACT2";
                        gpios = <&gpx1 4 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_MENU>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                home {
                        label = "SW-TACT3";
                        gpios = <&gpx1 5 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_HOME>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                up {
                        label = "SW-TACT4";
                        gpios = <&gpx1 6 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_UP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                down {
                        label = "SW-TACT5";
                        gpios = <&gpx1 7 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_DOWN>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                back {
                        label = "SW-TACT6";
                        gpios = <&gpx2 0 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_BACK>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                wakeup {
                        label = "SW-TACT7";
                        gpios = <&gpx2 1 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_WAKEUP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 5cb33ba5e296258e0b5104f5f2c0284815d8b75b..95210ef6a6b51ef129cb75a0ef53fb8d5a9f46b8 100644 (file)
@@ -37,7 +37,7 @@
                        label = "Power";
                        gpios = <&gpx1 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -46,7 +46,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index c1edd6d038a905dffd68e895cdafae25c967f160..0f500cb1eb2ddb0e1f81e2ed58323220d313c4aa 100644 (file)
@@ -37,7 +37,7 @@
                        label = "Power";
                        gpios = <&gpx1 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -46,7 +46,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 33e2d5f7315b5265fdfc5b74b4a3e38788737f00..234403422c6f4fceb1a8492a470a2ea28fe10a73 100644 (file)
 
        sss@10830000 {
                compatible = "samsung,exynos4210-secss";
-               reg = <0x10830000 0x10000>;
+               reg = <0x10830000 0x300>;
                interrupts = <0 112 0>;
                clocks = <&clock CLK_SSS>;
                clock-names = "secss";
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
new file mode 100644 (file)
index 0000000..f9aa6bb
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Exynos5410 SoC pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2013 Hardkernel Co., Ltd.
+ *              http://www.hardkernel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&pinctrl_0 {
+       gpa0: gpa0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpa1: gpa1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpa2: gpa2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb0: gpb0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb1: gpb1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb2: gpb2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb3: gpb3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc0: gpc0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc3: gpc3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc1: gpc1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc2: gpc2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpm5: gpm5 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpd1: gpd1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpe0: gpe0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpe1: gpe1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpf0: gpf0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpf1: gpf1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpg0: gpg0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpg1: gpg1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpg2: gpg2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gph0: gph0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gph1: gph1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpm7: gpm7 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy0: gpy0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy1: gpy1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy2: gpy2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy3: gpy3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy4: gpy4 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy5: gpy5 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy6: gpy6 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy7: gpy7 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpx0: gpx0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               interrupt-parent = <&combiner>;
+               #interrupt-cells = <2>;
+               interrupts = <23 0>,
+                            <24 0>,
+                            <25 0>,
+                            <25 1>,
+                            <26 0>,
+                            <26 1>,
+                            <27 0>,
+                            <27 1>;
+       };
+
+       gpx1: gpx1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               interrupt-parent = <&combiner>;
+               #interrupt-cells = <2>;
+               interrupts = <28 0>,
+                            <28 1>,
+                            <29 0>,
+                            <29 1>,
+                            <30 0>,
+                            <30 1>,
+                            <31 0>,
+                            <31 1>;
+       };
+
+       gpx2: gpx2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpx3: gpx3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
+
+&pinctrl_1 {
+       gpj0: gpj0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj1: gpj1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj2: gpj2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj3: gpj3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj4: gpj4 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk0: gpk0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk1: gpk1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk2: gpk2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk3: gpk3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
+
+&pinctrl_2 {
+       gpv0: gpv0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv1: gpv1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv2: gpv2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv3: gpv3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv4: gpv4 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
+
+&pinctrl_3 {
+       gpz: gpz {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
index cebeaab3abecd4177566e45d614ef6da6d8405ab..a731fbe28ebc01bc816f589e46dbceb54b7bad36 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "exynos5410.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
 / {
        model = "Samsung SMDK5410 board based on EXYNOS5410";
        compatible = "samsung,smdk5410", "samsung,exynos5410", "samsung,exynos5";
        disable-wp;
 };
 
+&pinctrl_0 {
+       srom_ctl: srom-ctl {
+               samsung,pins = "gpy0-3", "gpy0-4", "gpy0-5",
+                              "gpy1-0", "gpy1-1", "gpy1-2", "gpy1-3";
+               samsung,pin-function = <2>;
+               samsung,pin-drv = <0>;
+       };
+
+       srom_ebi: srom-ebi {
+               samsung,pins = "gpy3-0", "gpy3-1", "gpy3-2", "gpy3-3",
+                              "gpy3-4", "gpy3-5", "gpy3-6", "gpy3-7",
+                              "gpy5-0", "gpy5-1", "gpy5-2", "gpy5-3",
+                              "gpy5-4", "gpy5-5", "gpy5-6", "gpy5-7",
+                              "gpy6-0", "gpy6-1", "gpy6-2", "gpy6-3",
+                              "gpy6-4", "gpy6-5", "gpy6-6", "gpy6-7";
+               samsung,pin-function = <2>;
+               samsung,pin-pud = <3>;
+               samsung,pin-drv = <0>;
+       };
+};
+
+&sromc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&srom_ctl>, <&srom_ebi>;
+
+       ethernet@3,0 {
+               compatible = "smsc,lan9115";
+               reg = <3 0 0x10000>;
+               phy-mode = "mii";
+               interrupt-parent = <&gpx0>;
+               interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+               reg-io-width = <2>;
+               smsc,irq-push-pull;
+               smsc,force-internal-phy;
+
+               samsung,srom-page-mode = <1>;
+               samsung,srom-timing = <9 12 1 9 1 1>;
+       };
+};
+
 &uart0 {
        status = "okay";
 };
index fad0779b1b6e86d887a926a4afdee929ca33a1d7..f3490f5673449440b353ef9644d5fc944c545c4c 100644 (file)
        interrupt-parent = <&gic>;
 
        aliases {
+               pinctrl0 = &pinctrl_0;
+               pinctrl1 = &pinctrl_1;
+               pinctrl2 = &pinctrl_2;
+               pinctrl3 = &pinctrl_3;
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
                        reg = <0x10000000 0x100>;
                };
 
+               sromc: sromc@12250000 {
+                       compatible = "samsung,exynos-srom";
+                       reg = <0x12250000 0x14>;
+                       #address-cells = <2>;
+                       #size-cells = <1>;
+                       ranges = <0 0 0x04000000 0x20000
+                                 1 0 0x05000000 0x20000
+                                 2 0 0x06000000 0x20000
+                                 3 0 0x07000000 0x20000>;
+               };
+
                pmu_system_controller: system-controller@10040000 {
                        compatible = "samsung,exynos5410-pmu", "syscon";
                        reg = <0x10040000 0x5000>;
                        status = "disabled";
                };
 
+               pinctrl_0: pinctrl@13400000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x13400000 0x1000>;
+                       interrupts = <0 45 0>;
+
+                       wakeup-interrupt-controller {
+                               compatible = "samsung,exynos4210-wakeup-eint";
+                               interrupt-parent = <&gic>;
+                               interrupts = <0 32 0>;
+                       };
+               };
+
+               pinctrl_1: pinctrl@14000000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x14000000 0x1000>;
+                       interrupts = <0 46 0>;
+               };
+
+               pinctrl_2: pinctrl@10d10000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x10d10000 0x1000>;
+                       interrupts = <0 50 0>;
+               };
+
+               pinctrl_3: pinctrl@03860000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x03860000 0x1000>;
+                       interrupts = <0 47 0>;
+               };
+
                uart0: serial@12C00000 {
                        compatible = "samsung,exynos4210-uart";
                        reg = <0x12C00000 0x100>;
                };
        };
 };
+
+#include "exynos5410-pinctrl.dtsi"
index 4ecef6981d5c4c7dd1332324e405fcd0d34ce4f0..a103ce8c398586f857a1c90c9696d40b357a5056 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "exynos5420.dtsi"
+#include "exynos5420-cpus.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
                        label = "SW-TACT1";
                        gpios = <&gpx2 7 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_WAKEUP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &usbdrd_dwc3_1 {
        dr_mode = "host";
 };
diff --git a/arch/arm/boot/dts/exynos5420-cpus.dtsi b/arch/arm/boot/dts/exynos5420-cpus.dtsi
new file mode 100644 (file)
index 0000000..261d251
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * SAMSUNG EXYNOS5420 SoC cpu device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * This file provides desired ordering for Exynos5420 and Exynos5800
+ * boards: CPU[0123] being the A15.
+ *
+ * The Exynos5420, 5422 and 5800 actually share the same CPU configuration
+ * but particular boards choose different booting order.
+ *
+ * Exynos5420 and Exynos5800 always boot from Cortex-A15. On Exynos5422
+ * booting cluster (big or LITTLE) is chosen by IROM code by reading
+ * the gpg2-1 GPIO. By default all Exynos5422 based boards choose booting
+ * from the LITTLE: Cortex-A7.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x0>;
+                       clocks = <&clock CLK_ARM_CLK>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x1>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu2: cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x2>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu3: cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x3>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu4: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x100>;
+                       clocks = <&clock CLK_KFC_CLK>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+
+               cpu5: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x101>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+
+               cpu6: cpu@102 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x102>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+
+               cpu7: cpu@103 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x103>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+       };
+};
index 35cfb07dc4bb76073d0458983ad0fe95a8c430a9..3981ddb25036af95090f28d0afac7eaf59271451 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/maxim,max77802.h>
 #include <dt-bindings/regulator/maxim,max77802.h>
 #include "exynos5420.dtsi"
+#include "exynos5420-cpus.dtsi"
 
 / {
        model = "Google Peach Pit Rev 6+";
@@ -64,7 +65,7 @@
                        label = "Power";
                        gpios = <&gpx1 2 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -73,7 +74,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
        vdd-supply = <&ldo9_reg>;
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &dp {
        status = "okay";
        pinctrl-names = "default";
index ac35aefd320ff0acc0998f11b96c3375dc2454c5..0785fedf441e8001aa33ea198060477c5c644836 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "exynos5420.dtsi"
+#include "exynos5420-cpus.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
 
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &dp {
        pinctrl-names = "default";
        pinctrl-0 = <&dp_hpd>;
index 48a0a55314f5d184b03d36f1fb52374ef8cf794a..bb559d0cd956223a7a12bd3152d9ee31a0e46512 100644 (file)
                usbdrdphy1 = &usbdrd_phy1;
        };
 
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               cpu0: cpu@0 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x0>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+       cluster_a15_opp_table: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+               opp@1800000000 {
+                       opp-hz = /bits/ 64 <1800000000>;
+                       opp-microvolt = <1250000>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu1: cpu@1 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x1>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+               opp@1700000000 {
+                       opp-hz = /bits/ 64 <1700000000>;
+                       opp-microvolt = <1212500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu2: cpu@2 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x2>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+               opp@1600000000 {
+                       opp-hz = /bits/ 64 <1600000000>;
+                       opp-microvolt = <1175000>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu3: cpu@3 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x3>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+               opp@1500000000 {
+                       opp-hz = /bits/ 64 <1500000000>;
+                       opp-microvolt = <1137500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu4: cpu@100 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x100>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+               opp@1400000000 {
+                       opp-hz = /bits/ 64 <1400000000>;
+                       opp-microvolt = <1112500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu5: cpu@101 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x101>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+               opp@1300000000 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1062500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu6: cpu@102 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x102>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+               opp@1200000000 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1037500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1100000000 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1012500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1000000000 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = < 987500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@900000000 {
+                       opp-hz = /bits/ 64 <900000000>;
+                       opp-microvolt = < 962500>;
+                       clock-latency-ns = <140000>;
                };
+               opp@800000000 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = < 937500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@700000000 {
+                       opp-hz = /bits/ 64 <700000000>;
+                       opp-microvolt = < 912500>;
+                       clock-latency-ns = <140000>;
+               };
+       };
 
-               cpu7: cpu@103 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x103>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+       cluster_a7_opp_table: opp_table1 {
+               compatible = "operating-points-v2";
+               opp-shared;
+               opp@1300000000 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1275000>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1200000000 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1212500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1100000000 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1162500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1000000000 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <1112500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@900000000 {
+                       opp-hz = /bits/ 64 <900000000>;
+                       opp-microvolt = <1062500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@800000000 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = <1025000>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@700000000 {
+                       opp-hz = /bits/ 64 <700000000>;
+                       opp-microvolt = <975000>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@600000000 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <937500>;
+                       clock-latency-ns = <140000>;
                };
        };
 
+       /*
+        * The 'cpus' node is not present here but instead it is provided
+        * by exynos5420-cpus.dtsi or exynos5422-cpus.dtsi.
+        */
+
        cci: cci@10d20000 {
                compatible = "arm,cci-400";
                #address-cells = <1>;
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044000 0x20>;
                #power-domain-cells = <0>;
-               clocks = <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
-               clock-names = "asb0", "asb1";
+               clocks = <&clock CLK_FIN_PLL>,
+                        <&clock CLK_MOUT_USER_ACLK300_GSCL>,
+                        <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
+               clock-names = "oscclk", "clk0", "asb0", "asb1";
        };
 
        isp_pd: power-domain@10044020 {
 
        sss: sss@10830000 {
                compatible = "samsung,exynos4210-secss";
-               reg = <0x10830000 0x10000>;
+               reg = <0x10830000 0x300>;
                interrupts = <0 112 0>;
                clocks = <&clock CLK_SSS>;
                clock-names = "secss";
index b7f60c855459126bf63bb3ffe120ab6f87f37dfe..9b46b9fbac4e774b1542cde204f949bcc791b730 100644 (file)
@@ -4,78 +4,98 @@
  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * The only difference between EXYNOS5422 and EXYNOS5800 is cpu ordering. The
- * EXYNOS5422 is booting from Cortex-A7 core while the EXYNOS5800 is booting
- * from Cortex-A15 core.
+ * This file provides desired ordering for Exynos5422: CPU[0123] being the A7.
  *
- * EXYNOS5422 based board files can include this file to provide cpu ordering
- * which could boot a cortex-a7 from cpu0.
+ * The Exynos5420, 5422 and 5800 actually share the same CPU configuration
+ * but particular boards choose different booting order.
+ *
+ * Exynos5420 and Exynos5800 always boot from Cortex-A15. On Exynos5422
+ * booting cluster (big or LITTLE) is chosen by IROM code by reading
+ * the gpg2-1 GPIO. By default all Exynos5422 based boards choose booting
+ * from the LITTLE: Cortex-A7.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
-&cpu0 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x100>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
 
-&cpu1 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x101>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+               cpu0: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x100>;
+                       clocks = <&clock CLK_KFC_CLK>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu2 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x102>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+               cpu1: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x101>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu3 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x103>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+               cpu2: cpu@102 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x102>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu4 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x0>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
-};
+               cpu3: cpu@103 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x103>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu5 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x1>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
-};
+               cpu4: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       clocks = <&clock CLK_ARM_CLK>;
+                       reg = <0x0>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
 
-&cpu6 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x2>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
-};
+               cpu5: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x1>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu6: cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x2>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
 
-&cpu7 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x3>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
+               cpu7: cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x3>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+       };
 };
index 9134217446b8e2c321ecea78571b0cc3cdaed683..1bd507bfa750155579259b14195e08dab8adbb00 100644 (file)
                        <19200000>;
 };
 
+&cpu0 {
+       cpu-supply = <&buck6_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck2_reg>;
+};
+
 &hdmi {
        status = "okay";
        hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
index 064176f201e74da75c5f2b581602e12a7ab73b6b..6e9edc1610c4990777a516fbde1d8ef9a0a4cdab 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/maxim,max77802.h>
 #include <dt-bindings/regulator/maxim,max77802.h>
 #include "exynos5800.dtsi"
+#include "exynos5420-cpus.dtsi"
 
 / {
        model = "Google Peach Pi Rev 10+";
@@ -63,7 +64,7 @@
                        label = "Power";
                        gpios = <&gpx1 2 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -72,7 +73,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
        };
        vdd-supply = <&ldo9_reg>;
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &dp {
        status = "okay";
        pinctrl-names = "default";
index c0bb3563cac14cc5dd5e8c8666c3902a3068cb98..8213016803e5d4b1465e7c6742926c6734de10b8 100644 (file)
        compatible = "samsung,exynos5800-clock";
 };
 
+&cluster_a15_opp_table {
+       opp@1700000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1600000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1500000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@1400000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@1300000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@1200000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@1100000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@1000000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@900000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@800000000 {
+               opp-microvolt = <900000>;
+       };
+       opp@700000000 {
+               opp-microvolt = <900000>;
+       };
+       opp@600000000 {
+               opp-hz = /bits/ 64 <600000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@500000000 {
+               opp-hz = /bits/ 64 <500000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@400000000 {
+               opp-hz = /bits/ 64 <400000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@300000000 {
+               opp-hz = /bits/ 64 <300000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@200000000 {
+               opp-hz = /bits/ 64 <200000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+};
+
+&cluster_a7_opp_table {
+       opp@1300000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1200000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1100000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1000000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@900000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@800000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@700000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@600000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@500000000 {
+               opp-hz = /bits/ 64 <500000000>;
+               opp-microvolt = <1000000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@400000000 {
+               opp-hz = /bits/ 64 <400000000>;
+               opp-microvolt = <1000000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@300000000 {
+               opp-hz = /bits/ 64 <300000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@200000000 {
+               opp-hz = /bits/ 64 <200000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+};
+
 &mfc {
        compatible = "samsung,mfc-v8";
 };
index ed1d0b4578ef99402f22941b818451ac366518cf..cda6907a27b9bf18da633088849398634ef07218 100644 (file)
@@ -30,7 +30,7 @@
                        label = "BP1";
                        gpios = <&gpio3 18 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 7ac4f1af16ac856d243674c630b9207da71d8335..1eaa131e2d1809fb38525dc4d1105bc10c9cfef8 100644 (file)
                        label = "User button";
                        gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
index 927b391d2058d588eeddc93f7c5acd4e29cafe5f..88594747f454fede06968b152c6a7da76bb3e6b7 100644 (file)
@@ -36,7 +36,7 @@
                        label = "SW3";
                        gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
@@ -49,7 +49,7 @@
                        label = "SW4";
                        gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 4ea89344a5fff51115160fad9e34b16a4a4377eb..fd20e99c777e9764c9c1e298029ea6db0b652ad6 100644 (file)
                        compatible = "fixed-clock";
                        reg = <0>;
                        #clock-cells = <0>;
-                       clock-frequency = <27000000>;
+                       clock-frequency = <26000000>;
                };
        };
 
                        0x02020049 /* row 2, col 2, KEY_KP9 */
                >;
                gpio-activelow;
-               linux,wakeup;
+               wakeup-source;
                debounce-delay-ms = <100>;
                col-scan-delay-us = <5000>;
                linux,no-autorepeat;
index 75b036700d314cefa0040e88b92bc54e2f595a22..4727bbb804e1266a423203118dd22770a853d97f 100644 (file)
@@ -30,7 +30,7 @@
                        label = "BP1";
                        gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,input-type = <1>;
                };
        };
index 649befeb2cf96ef4b968f809a98d5d718227b002..018d24eb9965f8060050555bab1e1b8663bf1f37 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 321662f53e3309b835cfdc607681d8b0cc76aeb7..16fc69c69ab2e60a4cbd847aa98825fe8577e318 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
        cap-sdio-irq;
-       enable-sdio-wakeup;
+       wakeup-source;
        keep-power-in-suspend;
        max-frequency = <50000000>;
        no-1-8-v;
index 34599c547459a2d00756cb42bc05cca3f2fa2798..d270df3e58917aab23f21eb23ef04e667158bf4c 100644 (file)
@@ -41,7 +41,7 @@
                        label = "BP1";
                        gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
                        linux,code = <256>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,input-type = <1>;
                };
        };
index 3bc18835fb4bbbe0e388922689a70395bb532cd6..4486bc47d14009d96e039d33664a72204f8376c1 100644 (file)
                        label = "Home";
                        gpios = <&gpio5 10 0>;
                        linux,code = <102>; /* KEY_HOME */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                back {
                        label = "Back";
                        gpios = <&gpio5 11 0>;
                        linux,code = <158>; /* KEY_BACK */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                program {
                        label = "Program";
                        gpios = <&gpio5 12 0>;
                        linux,code = <362>; /* KEY_PROGRAM */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                volume-up {
index 53fd75c8ffcfd34c19f43d407bb2f8e6a4573495..368ccd0a053ee6dee4eef00f106e6cf7ede5630c 100644 (file)
                        label = "Volume Up";
                        gpios = <&gpio2 14 0>;
                        linux,code = <115>; /* KEY_VOLUMEUP */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                volume-down {
                        label = "Volume Down";
                        gpios = <&gpio2 15 0>;
                        linux,code = <114>; /* KEY_VOLUMEDOWN */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 13e842b0c7857b1050c73319842113d6bf08dbb3..0ecb43d88522318701304fad35e3c214cfcb276e 100644 (file)
                interrupts = <26 0>;
                gpios = <&gpio3 26 GPIO_ACTIVE_LOW>;
                ti,x-plate-ohms = <660>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 64804719f0f442de2225613f54518f68293fc190..3cf682a681f40a986e8eb3142f3621ec5d6736b0 100644 (file)
                interrupt-parent = <&gpio3>;
                interrupts = <23 0>;
                wakeup-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
                interrupt-parent = <&gpio3>;
                interrupts = <22 0>;
                wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
        status = "okay";
 
        lvds0: lvds-channel@0 {
-               fsl,data-mapping = "jeida";
-               fsl,data-width = <24>;
+               fsl,data-mapping = "spwg";
+               fsl,data-width = <18>;
                status = "okay";
 
                display-timings {
-                       native-mode = <&lvds_timing0>;
-                       lvds_timing0: hsd100pxn1 {
+                       native-mode = <&lvds0_timing0>;
+
+                       lvds0_timing0: hsd100pxn1 {
                                clock-frequency = <65000000>;
                                hactive = <1024>;
                                vactive = <768>;
                                hsync-active = <0>;
                                vsync-active = <0>;
                                de-active = <1>;
-                               pixelclk-active = <0>;
+                               pixelclk-active = <1>;
+                       };
+
+                       lvds0_timing1: nl12880bc20 {
+                               clock-frequency = <71000000>;
+                               hactive = <1280>;
+                               vactive = <800>;
+                               hback-porch = <50>;
+                               hsync-len = <60>;
+                               hfront-porch = <50>;
+                               vback-porch = <5>;
+                               vsync-len = <13>;
+                               vfront-porch = <5>;
+                               hsync-active = <0>;
+                               vsync-active = <0>;
+                               de-active = <1>;
+                               pixelclk-active = <1>;
                        };
                };
        };
 
        lvds1: lvds-channel@1 {
-               fsl,data-mapping = "jeida";
-               fsl,data-width = <24>;
+               fsl,data-mapping = "spwg";
+               fsl,data-width = <18>;
                status = "okay";
 
                display-timings {
-                       native-mode = <&lvds_timing1>;
-                       lvds_timing1: hsd100pxn1 {
+                       native-mode = <&lvds1_timing0>;
+
+                       lvds1_timing0: hsd100pxn1 {
                                clock-frequency = <65000000>;
                                hactive = <1024>;
                                vactive = <768>;
                                hsync-active = <0>;
                                vsync-active = <0>;
                                de-active = <1>;
-                               pixelclk-active = <0>;
+                               pixelclk-active = <1>;
                        };
                };
        };
index d3e50b22064f28777bbd2a3b60f512d844ff9418..bd3dfefa5778acbcc1124279f02517fc8aecaa3e 100644 (file)
@@ -37,7 +37,7 @@
                        compatible = "fixed-clock";
                        reg = <0>;
                        #clock-cells = <0>;
-                       clock-frequency = <27000000>;
+                       clock-frequency = <26000000>;
                };
        };
 
@@ -50,7 +50,7 @@
                        label = "Power Button";
                        gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
                        linux,code = <116>; /* KEY_POWER */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index c275eecc9472006de381c2a450025badea821f2c..d35a5cdc3229638445b4aacf4414da1bf90b8210 100644 (file)
@@ -77,7 +77,7 @@
                interrupt-parent = <&gpio3>;
                interrupts = <22 0>;
                wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 00bd63e63d0cdd47f4ffa012f572f10323a2d14f..b715deb4ea4666e755ddece8ce198f38187fec15 100644 (file)
@@ -44,7 +44,7 @@
                        label = "recovery";
                        gpios = <&gpio3 16 1>;
                        linux,code = <0x198>; /* KEY_RESTART */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
diff --git a/arch/arm/boot/dts/imx6q-icore-rqs.dts b/arch/arm/boot/dts/imx6q-icore-rqs.dts
new file mode 100644 (file)
index 0000000..0053188
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015 Amarula Solutions B.V.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-icore-rqs.dtsi"
+
+/ {
+       model = "Engicam i.CoreM6 Quad SOM";
+       compatible = "engicam,imx6-icore-rqs", "fsl,imx6q";
+
+       sound {
+               compatible = "fsl,imx-audio-sgtl5000";
+               model = "imx-audio-sgtl5000";
+               ssi-controller = <&ssi1>;
+               audio-codec = <&codec>;
+               audio-routing =
+                       "MIC_IN", "Mic Jack",
+                       "Mic Jack", "Mic Bias",
+                       "Headphone Jack", "HP_OUT";
+               mux-int-port = <1>;
+               mux-ext-port = <4>;
+       };
+};
+
+&i2c3 {
+       codec: sgtl5000@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+               clocks = <&clks IMX6QDL_CLK_CKO>;
+               VDDA-supply = <&reg_2p5v>;
+               VDDIO-supply = <&reg_3p3v>;
+               VDDD-supply = <&reg_1p8v>;
+       };
+};
+
+&sata {
+       status = "okay";
+};
index 88aa1e4c792d2c2c5d56d8a819cac4640bb46369..2792da93db1fc7f86d869ac3259f3b19d4e14156 100644 (file)
@@ -77,7 +77,7 @@
                interrupt-parent = <&gpio3>;
                interrupts = <22 0>;
                wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index e26ebeb5b45c055a17919cbbc7751fa4588800f8..a8f3500ee522b3999a31d3c1dff2946e1481ba5d 100644 (file)
@@ -94,7 +94,7 @@
                        label = "User button";
                        gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 5cd16f2178b80fd955e8be48b776fb601afe606b..9d7ab6cdc9a60b3a0b4a76c853851d8b87700737 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
                pinctrl_pwm4: pwm4grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT2__PWM4_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_CMD__PWM4_OUT            0x1b0b1
                        >;
                };
 
index 9fa8a10c7cc8892266a87be766974b03fcfff326..8dd74e98ffd69b35f1dd8349e55231db25592148 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
index e8375e173873edc5dc39ef011321cbbac9378f00..ec3fe7444e154c4d39f28a5a4a6f8afef05eeb1f 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
index 66983dc5cbdae8868b2f20ade01b3df1be44575d..367cc49eea0d69cd63c3d3a86121f260b91f05c2 100644 (file)
 };
 
 &pwm4 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_pwm4>;
+       pinctrl-names = "default", "state_dio";
+       pinctrl-0 = <&pinctrl_pwm4_backlight>;
+       pinctrl-1 = <&pinctrl_pwm4_dio>;
        status = "okay";
 };
 
                        >;
                };
 
-               pinctrl_pwm4: pwm4grp {
+               pinctrl_pwm4_backlight: pwm4grpbacklight {
                        fsl,pins = <
+                               /* LVDS_PWM J6.5 */
                                MX6QDL_PAD_SD1_CMD__PWM4_OUT            0x1b0b1
                        >;
                };
 
+               pinctrl_pwm4_dio: pwm4grpdio {
+                       fsl,pins = <
+                               /* DIO3 J16.4 */
+                               MX6QDL_PAD_SD4_DAT2__PWM4_OUT           0x1b0b1
+                       >;
+               };
+
                pinctrl_uart1: uart1grp {
                        fsl,pins = <
                                MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA      0x1b0b1
index cca39f194017c262d6c850172f76c584a73f099f..f27f184558fb4daa43ad525afd06750eea5fd946 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
index 6dd0b764e036d1c51cdde3015c1a2cc56178a999..d6c2358ffad4490af869a099e1884bc3122415ac 100644 (file)
@@ -48,7 +48,7 @@
 
        ir_recv: ir-receiver {
                compatible = "gpio-ir-receiver";
-               gpios = <&gpio3 5 1>;
+               gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_hummingboard_gpio3_5>;
        };
@@ -67,7 +67,7 @@
                reg_usbh1_vbus: usb-h1-vbus {
                        compatible = "regulator-fixed";
                        enable-active-high;
-                       gpio = <&gpio1 0 0>;
+                       gpio = <&gpio1 0 GPIO_ACTIVE_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
                        regulator-name = "usb_h1_vbus";
@@ -78,7 +78,7 @@
                reg_usbotg_vbus: usb-otg-vbus {
                        compatible = "regulator-fixed";
                        enable-active-high;
-                       gpio = <&gpio3 22 0>;
+                       gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
                        regulator-name = "usb_otg_vbus";
 &pcie {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_hummingboard_pcie_reset>;
-       reset-gpio = <&gpio3 4 0>;
+       reset-gpio = <&gpio3 4 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
new file mode 100644 (file)
index 0000000..f8d945a
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2015 Amarula Solutions B.V.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/imx6qdl-clock.h>
+
+/ {
+       memory {
+               reg = <0x10000000 0x80000000>;
+       };
+
+       reg_1p8v: regulator-1p8v {
+               compatible = "regulator-fixed";
+               regulator-name = "1P8V";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_2p5v: regulator-2p5v {
+               compatible = "regulator-fixed";
+               regulator-name = "2P5V";
+               regulator-min-microvolt = <2500000>;
+               regulator-max-microvolt = <2500000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_3p3v: regulator-3p3v {
+               compatible = "regulator-fixed";
+               regulator-name = "3P3V";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_sd3_vmmc: regulator-sd3-vmmc {
+               compatible = "regulator-fixed";
+               regulator-name = "P3V3_SD3_SWITCHED";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio1 4 GPIO_ACTIVE_LOW>;
+               enable-active-high;
+       };
+
+       reg_sd4_vmmc: regulator-sd4-vmmc {
+               compatible = "regulator-fixed";
+               regulator-name = "P3V3_SD4_SWITCHED";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_usb_h1_vbus: regulator-usb-h1-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb_h1_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_usb_otg_vbus: regulator-usb-otg-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb_otg_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       usb_hub: usb-hub {
+               compatible = "smsc,usb3503a";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_usbhub>;
+               reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+               clocks = <&clks IMX6QDL_CLK_LVDS2_GATE>;
+               clock-names = "refclk";
+       };
+};
+
+&clks {
+       assigned-clocks = <&clks IMX6QDL_CLK_LVDS2_SEL>;
+       assigned-clock-parents = <&clks IMX6QDL_CLK_OSC>;
+};
+
+&audmux {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_audmux>;
+       status = "okay";
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet>;
+       phy-handle = <&eth_phy>;
+       phy-mode = "rgmii";
+       status = "okay";
+
+       mdio {
+               eth_phy: ethernet-phy {
+                       rxc-skew-ps = <1140>;
+                       txc-skew-ps = <1140>;
+                       txen-skew-ps = <600>;
+                       rxdv-skew-ps = <240>;
+                       rxd0-skew-ps = <420>;
+                       rxd1-skew-ps = <600>;
+                       rxd2-skew-ps = <420>;
+                       rxd3-skew-ps = <240>;
+                       txd0-skew-ps = <60>;
+                       txd1-skew-ps = <60>;
+                       txd2-skew-ps = <60>;
+                       txd3-skew-ps = <240>;
+               };
+       };
+};
+
+&i2c1 {
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c1>;
+       status = "okay";
+};
+
+&i2c2 {
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2>;
+       status = "okay";
+};
+
+&i2c3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c3>;
+       status = "okay";
+};
+
+&pcie {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pcie>;
+       reset-gpio = <&gpio3 29 GPIO_ACTIVE_LOW>;
+       status = "okay";
+};
+
+&ssi1 {
+       fsl,mode = "i2s-slave";
+       status = "okay";
+};
+
+&uart4 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart4>;
+       status = "okay";
+};
+
+&usbh1 {
+       vbus-supply = <&reg_usb_h1_vbus>;
+       disable-over-current;
+       clocks = <&clks IMX6QDL_CLK_USBOH3>;
+       status = "okay";
+};
+
+&usbotg {
+       vbus-supply = <&reg_usb_otg_vbus>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usbotg>;
+       disable-over-current;
+       status = "okay";
+};
+
+&usdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc1>;
+       no-1-8-v;
+       status = "okay";
+};
+
+&usdhc3 {
+       pinctrl-names = "default", "state_100mhz", "state_200mhz";
+       pinctrl-0 = <&pinctrl_usdhc3>;
+       pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+       pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+       vmcc-supply = <&reg_sd3_vmmc>;
+       cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+       bus-witdh=<4>;
+       no-1-8-v;
+       status = "okay";
+};
+
+&usdhc4 {
+       pinctrl-names = "default", "state_100mhz", "state_200mhz";
+       pinctrl-0 = <&pinctrl_usdhc4>;
+       pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
+       pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
+       vmcc-supply = <&reg_sd4_vmmc>;
+       bus-witdh=<8>;
+       no-1-8-v;
+       non-removable;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl_audmux: audmux {
+               fsl,pins = <
+                       MX6QDL_PAD_DISP0_DAT20__AUD4_TXC  0x130b0
+                       MX6QDL_PAD_DISP0_DAT21__AUD4_TXD  0x110b0
+                       MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0
+                       MX6QDL_PAD_DISP0_DAT23__AUD4_RXD  0x130b0
+               >;
+       };
+
+       pinctrl_enet: enetgrp {
+               fsl,pins = <
+                       MX6QDL_PAD_ENET_MDIO__ENET_MDIO       0x1b0b0
+                       MX6QDL_PAD_ENET_MDC__ENET_MDC         0x1b0b0
+                       MX6QDL_PAD_RGMII_TXC__RGMII_TXC       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD0__RGMII_TD0       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD1__RGMII_TD1       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD2__RGMII_TD2       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD3__RGMII_TD3       0x1b0b0
+                       MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+                       MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK  0x1b0b0
+                       MX6QDL_PAD_RGMII_RXC__RGMII_RXC       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD0__RGMII_RD0       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD1__RGMII_RD1       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD2__RGMII_RD2       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD3__RGMII_RD3       0x1b0b0
+                       MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+                       MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN     0x1b0b0
+               >;
+       };
+
+       pinctrl_i2c1: i2c1grp {
+               fsl,pins = <
+                       MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+                       MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+               >;
+       };
+
+       pinctrl_i2c2: i2c2grp {
+               fsl,pins = <
+                       MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+                       MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+               >;
+       };
+
+       pinctrl_i2c3: i2c3grp {
+               fsl,pins = <
+                       MX6QDL_PAD_GPIO_5__I2C3_SCL  0x4001b8b1
+                       MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+                       MX6QDL_PAD_GPIO_0__CCM_CLKO1    0x130b0
+               >;
+       };
+
+       pinctrl_pcie: pciegrp {
+               fsl,pins = <
+                       MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1f059  /* PCIe Reset */
+               >;
+       };
+
+       pinctrl_uart4: uart4grp {
+               fsl,pins = <
+                       MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+                       MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+               >;
+       };
+
+       pinctrl_usbhub: usbhubgrp {
+               fsl,pins = <
+                       MX6QDL_PAD_GPIO_6__GPIO1_IO06  0x1f059  /* HUB USB Reset */
+               >;
+       };
+
+       pinctrl_usbotg: usbotggrp {
+               fsl,pins = <
+                       MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+               >;
+       };
+
+       pinctrl_usdhc1: usdhc1grp {
+               fsl,pins = <
+                       MX6QDL_PAD_SD1_CMD__SD1_CMD    0x17071
+                       MX6QDL_PAD_SD1_CLK__SD1_CLK    0x10071
+                       MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17071
+                       MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17071
+                       MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17071
+                       MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17071
+               >;
+       };
+
+       pinctrl_usdhc3: usdhc3grp {
+               fsl,pins = <
+                       MX6QDL_PAD_SD3_CMD__SD3_CMD    0x17070
+                       MX6QDL_PAD_SD3_CLK__SD3_CLK    0x10070
+                       MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17070
+                       MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17070
+                       MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17070
+                       MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17070
+                       MX6QDL_PAD_GPIO_1__GPIO1_IO01  0x1f059  /* CD */
+                       MX6QDL_PAD_GPIO_4__GPIO1_IO04  0x1f059  /* PWR */
+               >;
+       };
+
+       pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD3_CMD__SD3_CMD    0x170B1
+                       MX6QDL_PAD_SD3_CLK__SD3_CLK    0x100B1
+                       MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170B1
+                       MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170B1
+                       MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170B1
+                       MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170B1
+               >;
+       };
+
+       pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD3_CMD__SD3_CMD    0x170F9
+                       MX6QDL_PAD_SD3_CLK__SD3_CLK    0x100F9
+                       MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170F9
+                       MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170F9
+                       MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170F9
+                       MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170F9
+               >;
+       };
+
+       pinctrl_usdhc4: usdhc4grp {
+               fsl,pins = <
+                       MX6QDL_PAD_SD4_CMD__SD4_CMD    0x17070
+                       MX6QDL_PAD_SD4_CLK__SD4_CLK    0x10070
+                       MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17070
+                       MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17070
+                       MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17070
+                       MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17070
+                       MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17070
+                       MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17070
+                       MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17070
+                       MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17070
+               >;
+       };
+
+       pinctrl_usdhc4_100mhz: usdhc4grp_100mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD4_CMD__SD4_CMD    0x170B1
+                       MX6QDL_PAD_SD4_CLK__SD4_CLK    0x100B1
+                       MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x170B1
+                       MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x170B1
+                       MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x170B1
+                       MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x170B1
+                       MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x170B1
+                       MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x170B1
+                       MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x170B1
+                       MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x170B1
+               >;
+       };
+
+       pinctrl_usdhc4_200mhz: usdhc4grp_200mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD4_CMD__SD4_CMD    0x170F9
+                       MX6QDL_PAD_SD4_CLK__SD4_CLK    0x100F9
+                       MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x170F9
+                       MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x170F9
+                       MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x170F9
+                       MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x170F9
+                       MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x170F9
+                       MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x170F9
+                       MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x170F9
+                       MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x170F9
+               >;
+       };
+};
index 6d4069cc9419eae3361fece8cd52a1e29b25b853..86460e46d0559b770a16bcecfad8e2522395f226 100644 (file)
        bus-width = <4>;
        mmc-pwrseq = <&usdhc1_pwrseq>;
        keep-power-in-suspend;
+       no-1-8-v;
        non-removable;
        vmmc-supply = <&reg_brcm>;
        status = "okay";
index a35d54fd9cd320eea5d5890c174161588f47e823..dc74aa395ff5473d4cbdb36f28bfef16a1adf916 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
index caeed56b74a391ae996b877dea9b0a325a14ff42..c6c590d1e94096e2f95e2e539d09acc10b2bce5a 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
index 1a69a3420ac8d14a8977a7f8d862decb546f87bd..0f1aca450fe6abf931e6eb207b3f6f490e57cfc4 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
index a6d445c17779cf5c39c26ff41170bb38eda31624..26b7a20a0abbe796f6c92f8e53ec0caebf0e730e 100644 (file)
                power {
                        label = "Power Button";
                        gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_POWER>;
                };
 
                volume-up {
                        label = "Volume Up";
                        gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_VOLUMEUP>;
                };
 
                volume-down {
                        label = "Volume Down";
                        gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_VOLUMEDOWN>;
                };
        };
index 13cb7ccfea44dd653e8100669d415b88699c7831..efd06b576f1da445b991a279a0af48a687a084dc 100644 (file)
@@ -41,7 +41,7 @@
                        compatible = "fixed-clock";
                        reg = <0>;
                        #clock-cells = <0>;
-                       clock-frequency = <27000000>;
+                       clock-frequency = <26000000>;
                };
        };
 
@@ -52,7 +52,7 @@
                        label = "Power Button";
                        gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
 &fec {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet>;
+       clocks = <&clks IMX6QDL_CLK_ENET>,
+                <&clks IMX6QDL_CLK_ENET>,
+                <&clks IMX6QDL_CLK_ENET_REF>,
+                <&clks IMX6QDL_CLK_ENET_REF>;
+       clock-names = "ipg", "ahb", "ptp", "enet_out";
        phy-mode = "rmii";
        phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>;
        phy-supply = <&reg_3v3_etn>;
                interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
                reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
                wake-gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 
        touchscreen: tsc2007@48 {
                interrupts = <26 0>;
                gpios = <&gpio3 26 GPIO_ACTIVE_LOW>;
                ti,x-plate-ohms = <660>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 1211da894ee9993e630aa0744e6e2478d326e5be..d3e54e40a017264400e6a787453f421e8aac2f21 100644 (file)
                        gpio = <&gpio7 12 0>;
                };
        };
+
+       sound {
+               compatible = "fsl,imx6q-udoo-ac97",
+                            "fsl,imx-audio-ac97";
+               model = "fsl,imx6q-udoo-ac97";
+               audio-cpu = <&ssi1>;
+               audio-routing =
+                       "RX", "Mic Jack",
+                       "Headphone Jack", "TX";
+               mux-int-port = <1>;
+               mux-ext-port = <6>;
+       };
 };
 
 &fec {
                                MX6QDL_PAD_SD3_DAT3__SD3_DATA3          0x17059
                        >;
                };
+
+               pinctrl_ac97_running: ac97running {
+                       fsl,pins = <
+                               MX6QDL_PAD_DI0_PIN2__AUD6_TXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN3__AUD6_TXFS          0x1b0b0
+                               MX6QDL_PAD_DI0_PIN4__AUD6_RXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN15__AUD6_TXC          0x1b0b0
+                               MX6QDL_PAD_EIM_EB2__GPIO2_IO30          0x1b0b0
+                       >;
+               };
+
+               pinctrl_ac97_warm_reset: ac97warmreset {
+                       fsl,pins = <
+                               MX6QDL_PAD_DI0_PIN2__AUD6_TXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN3__GPIO4_IO19         0x1b0b0
+                               MX6QDL_PAD_DI0_PIN4__AUD6_RXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN15__AUD6_TXC          0x1b0b0
+                               MX6QDL_PAD_EIM_EB2__GPIO2_IO30          0x1b0b0
+                       >;
+               };
+
+               pinctrl_ac97_reset: ac97reset {
+                       fsl,pins = <
+                               MX6QDL_PAD_DI0_PIN2__GPIO4_IO18         0x1b0b0
+                               MX6QDL_PAD_DI0_PIN3__GPIO4_IO19         0x1b0b0
+                               MX6QDL_PAD_DI0_PIN4__AUD6_RXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN15__AUD6_TXC          0x1b0b0
+                               MX6QDL_PAD_EIM_EB2__GPIO2_IO30          0x1b0b0
+                       >;
+               };
        };
 };
 
        non-removable;
        status = "okay";
 };
+
+&audmux {
+       status = "okay";
+};
+
+&ssi1 {
+       cell-index = <0>;
+       fsl,mode = "ac97-slave";
+       pinctrl-names = "ac97-running", "ac97-reset", "ac97-warm-reset";
+       pinctrl-0 = <&pinctrl_ac97_running>;
+       pinctrl-1 = <&pinctrl_ac97_reset>;
+       pinctrl-2 = <&pinctrl_ac97_warm_reset>;
+       ac97-gpios = <&gpio4 19 0 &gpio4 18 0 &gpio2 30 0>;
+       status = "okay";
+};
index 4f6ae921656f16dfd3814d7a70972be798e0fce9..f74d3db4846dacacd54b37e9cb1d35d707689368 100644 (file)
                                #size-cells = <1>;
                                reg = <0x2100000 0x10000>;
                                ranges = <0 0x2100000 0x10000>;
-                               interrupt-parent = <&intc>;
                                clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
                                         <&clks IMX6QDL_CLK_CAAM_ACLK>,
                                         <&clks IMX6QDL_CLK_CAAM_IPG>,
index 10c69963100fab7e972069a117a625175c21a72c..058bcdceb81aa1874ebbd1826d18c8a8b2743d4b 100644 (file)
        bus-width = <4>;
        non-removable;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        mmc-pwrseq = <&usdhc3_pwrseq>;
        status = "okay";
 };
index 115f3fd78971868ad7025fbb0f46c17c4252e574..96ea936eeeb0a1dce450696b38893ed3b341ed48 100644 (file)
@@ -52,7 +52,7 @@
        cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&vcc_sd3>;
        status = "okay";
 };
index 94ac4005d9cd3904a9d0148d8f3e0214835fb067..f1d37306e8bf4865c40a8923d76710654e0a9b0c 100644 (file)
        status = "okay";
 };
 
+&i2c3 {
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c3>;
+       status = "okay";
+};
+
 &i2c4 {
         clock-frequency = <100000>;
         pinctrl-names = "default";
        non-removable;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        status = "okay";
 };
 
        cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&vcc_sd3>;
        status = "okay";
 };
                        >;
                };
 
+               pinctrl_i2c3: i2c3grp {
+                       fsl,pins = <
+                               MX6SX_PAD_KEY_ROW4__I2C3_SDA            0x4001b8b1
+                               MX6SX_PAD_KEY_COL4__I2C3_SCL            0x4001b8b1
+                       >;
+               };
+
                pinctrl_i2c4: i2c4grp {
                        fsl,pins = <
                                MX6SX_PAD_CSI_DATA07__I2C4_SDA          0x4001b8b1
index 6aaa5ec3d846eae6019e4414d4c925a5ebc2adc5..e06fe978ae3b57b389afa7adf2a094b14dd5aa05 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&reg_sd1_vmmc>;
        status = "okay";
 };
        pinctrl-0 = <&pinctrl_usdhc2>;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        status = "okay";
 };
 
index d63c597c07835595521ae00862343134f71967cf..f8a868552707124b3e1a86d074c62718927c3805 100644 (file)
@@ -22,7 +22,7 @@
        pinctrl-0 = <&pinctrl_usdhc1>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-       enable-sdio-wakeup;
+       wakeup-source;
        status = "okay";
 };
 
index b2c453662905e313626a79a93c1543d1bfb301ef..b267f79e30590bba28c50ab9daaaf887d4bb1d76 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc1>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-       enable-sdio-wakeup;
+       wakeup-source;
        keep-power-in-suspend;
        status = "okay";
 };
index 25ad3097874016ab3f1281e6a46d7824c7a91fc8..b5a50e0e7ff1980523e6567934135b7aad7a73d9 100644 (file)
                clock-output-names = "osc";
        };
 
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupt-parent = <&intc>;
+       };
+
        etr@30086000 {
                compatible = "arm,coresight-tmc", "arm,primecell";
                reg = <0x30086000 0x1000>;
diff --git a/arch/arm/boot/dts/k2g-evm.dts b/arch/arm/boot/dts/k2g-evm.dts
new file mode 100644 (file)
index 0000000..99663b3
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Device Tree Source for K2G EVM
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/dts-v1/;
+
+#include "k2g.dtsi"
+
+/ {
+       compatible =  "ti,k2g-evm", "ti,k2g", "ti,keystone";
+       model = "Texas Instruments K2G General Purpose EVM";
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000008 0x00000000 0x00000000 0x80000000>;
+       };
+
+};
+
+&uart0 {
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/k2g.dtsi b/arch/arm/boot/dts/k2g.dtsi
new file mode 100644 (file)
index 0000000..7ff2796
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Device Tree Source for K2G SOC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "skeleton.dtsi"
+
+/ {
+       compatible = "ti,k2g","ti,keystone";
+       model = "Texas Instruments K2G SoC";
+       #address-cells = <2>;
+       #size-cells = <2>;
+       interrupt-parent = <&gic>;
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a15";
+                       device_type = "cpu";
+                       reg = <0>;
+               };
+       };
+
+       gic: interrupt-controller@02561000 {
+               compatible = "arm,cortex-a15-gic";
+               #interrupt-cells = <3>;
+               interrupt-controller;
+               reg = <0x0 0x02561000 0x0 0x1000>,
+                     <0x0 0x02562000 0x0 0x2000>,
+                     <0x0 0x02564000 0x0 0x1000>,
+                     <0x0 0x02566000 0x0 0x2000>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+                               IRQ_TYPE_LEVEL_HIGH)>;
+       };
+
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts =
+                       <GIC_PPI 13
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 14
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 11
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 10
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       pmu {
+               compatible = "arm,cortex-a15-pmu";
+               interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
+       };
+
+       soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "ti,keystone","simple-bus";
+               ranges = <0x0 0x0 0x0 0xc0000000>;
+               dma-ranges = <0x80000000 0x8 0x00000000 0x80000000>;
+
+               uart0: serial@02530c00 {
+                       compatible = "ns16550a";
+                       current-speed = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       reg = <0x02530c00 0x100>;
+                       interrupts = <GIC_SPI 164 IRQ_TYPE_EDGE_RISING>;
+                       clock-frequency = <200000000>;
+                       status = "disabled";
+               };
+       };
+};
index bf4143c6cb8f10991d4abbaae126966f9f9c753c..b84af3da8c84d596b2b950489541c4efd9216783 100644 (file)
@@ -14,7 +14,7 @@
 #include "kirkwood-synology.dtsi"
 
 / {
-       model = "Synology DS111";
+       model = "Synology DS112";
        compatible = "synology,ds111", "marvell,kirkwood";
 
        memory {
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-6282.dtsi b/arch/arm/boot/dts/kirkwood-linkstation-6282.dtsi
new file mode 100644 (file)
index 0000000..6548e68
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * Device Tree common file for kirkwood-6282 based Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
+#include "kirkwood-linkstation.dtsi"
+
+/ {
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd0: pmx-power-hdd0 {
+                               marvell,pins = "mpp8";
+                               marvell,function = "gpio";
+                       };
+                       pmx_usb_vbus: pmx-usb-vbus {
+                               marvell,pins = "mpp12";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_high: pmx-fan-high {
+                               marvell,pins = "mpp16";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_low: pmx-fan-low {
+                               marvell,pins = "mpp17";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_alarm: pmx-led-alarm {
+                               marvell,pins = "mpp36";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_red: pmx-led-function-red {
+                               marvell,pins = "mpp37";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_info: pmx-led-info {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_blue: pmx-led-function-blue {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_power: pmx-led-power {
+                               marvell,pins = "mpp40";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_lock: pmx-fan-lock {
+                               marvell,pins = "mpp43";
+                               marvell,function = "gpio";
+                       };
+                       pmx_button_function: pmx-button-function {
+                               marvell,pins = "mpp45";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_switch: pmx-power-switch {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_auto_switch: pmx-power-auto-switch {
+                               marvell,pins = "mpp47";
+                               marvell,function = "gpio";
+                       };
+               };
+       };
+
+       gpio_keys {
+               function-button {
+                       gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
+               };
+
+               power-on-switch {
+                       gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+               };
+
+               power-auto-switch {
+                       gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+               };
+       };
+
+       gpio_leds {
+               red-alarm-led {
+                       label = "linkstation:red:alarm";
+                       gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+               };
+
+               red-function-led {
+                       label = "linkstation:red:function";
+                       gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+               };
+
+               amber-info-led {
+                       label = "linkstation:amber:info";
+                       gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+               };
+
+               blue-function-led {
+                       label = "linkstation:blue:function";
+                       gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+               };
+
+               blue-power-led {
+                       label = "linkstation:blue:power";
+                       gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+                       default-state = "keep";
+               };
+       };
+
+       gpio_fan {
+               compatible = "gpio-fan";
+               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+               pinctrl-names = "default";
+
+               gpios = <&gpio0 17 GPIO_ACTIVE_LOW
+                        &gpio0 16 GPIO_ACTIVE_LOW>;
+
+               gpio-fan,speed-map = <0 3
+                               1500 2
+                               3250 1
+                               5000 0>;
+
+               alarm-gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>;
+       };
+
+       regulators {
+               usb_power: regulator@1 {
+                       gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+               };
+
+               hdd_power0: regulator@2 {
+                       gpio = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
+
+&mdio {
+       status = "okay";
+
+       ethphy0: ethernet-phy@0 {
+               device_type = "ethernet-phy";
+               reg = <0>;
+       };
+};
+
+&eth0 {
+       status = "okay";
+
+       ethernet0-port@0 {
+               phy-handle = <&ethphy0>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-duo-6281.dtsi b/arch/arm/boot/dts/kirkwood-linkstation-duo-6281.dtsi
new file mode 100644 (file)
index 0000000..cf2e69f
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Device Tree common file for kirkwood-6281 based 2-Bay Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
+#include "kirkwood-linkstation.dtsi"
+
+/ {
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd0: pmx-power-hdd0 {
+                               marvell,pins = "mpp28";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_hdd1: pmx-power-hdd1 {
+                               marvell,pins = "mpp29";
+                               marvell,function = "gpio";
+                       };
+                       pmx_usb_vbus: pmx-usb-vbus {
+                               marvell,pins = "mpp37";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_alarm: pmx-led-alarm {
+                               marvell,pins = "mpp49";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_red: pmx-led-function-red {
+                               marvell,pins = "mpp34";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_blue: pmx-led-function-blue {
+                               marvell,pins = "mpp36";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_info: pmx-led-info {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_power: pmx-led-power {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+                       pmx_button_function: pmx-button-function {
+                               marvell,pins = "mpp41";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_switch: pmx-power-switch {
+                               marvell,pins = "mpp42";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_auto_switch: pmx-power-auto-switch {
+                               marvell,pins = "mpp43";
+                               marvell,function = "gpio";
+                       };
+               };
+
+               sata@80000 {
+                       nr-ports = <2>;
+               };
+       };
+
+       gpio_keys {
+               function-button {
+                       gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+               };
+
+               power-on-switch {
+                       gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+               };
+
+               power-auto-switch {
+                       gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+               };
+       };
+
+       gpio_leds {
+               red-alarm-led {
+                       label = "linkstation:red:alarm";
+                       gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+               };
+
+               red-function-led {
+                       label = "linkstation:red:function";
+                       gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+               };
+
+               amber-info-led {
+                       label = "linkstation:amber:info";
+                       gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+               };
+
+               blue-function-led {
+                       label = "linkstation:blue:function";
+                       gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+               };
+
+               blue-power-led {
+                       label = "linkstation:blue:power";
+                       gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+                       default-state = "keep";
+               };
+       };
+
+       regulators {
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+
+               usb_power: regulator@1 {
+                       gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+               };
+
+               hdd_power0: regulator@2 {
+                       gpio = <&gpio0 28 GPIO_ACTIVE_HIGH>;
+               };
+
+               hdd_power1: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "HDD1 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
+
+&mdio {
+       status = "okay";
+
+       ethphy1: ethernet-phy@8 {
+               device_type = "ethernet-phy";
+               reg = <8>;
+       };
+};
+
+&eth1 {
+       status = "okay";
+
+       ethernet1-port@0 {
+               phy-handle = <&ethphy1>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lsqvl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lsqvl.dts
new file mode 100644 (file)
index 0000000..6dc0df2
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-QVL
+ *
+ * Copyright (C) 2016, Mario Lange <mario_lange@gmx.net>
+ *
+ * Based on kirkwood-linkstation-lswvl.dts,
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-6282.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-QVL";
+       compatible = "buffalo,lsqvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+       memory { /* 256 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd1: pmx-power-hdd1 {
+                               marvell,pins = "mpp9";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr0: pmx-led-hdderr0 {
+                               marvell,pins = "mpp34";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr1: pmx-led-hdderr1 {
+                               marvell,pins = "mpp35";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr2: pmx-led-hdderr2 {
+                               marvell,pins = "mpp24";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr3: pmx-led-hdderr3 {
+                               marvell,pins = "mpp25";
+                               marvell,function = "gpio";
+                       };
+               };
+
+               sata@80000 {
+                       nr-ports = <2>;
+               };
+       };
+
+       gpio_leds {
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue
+                            &pmx_led_hdderr0
+                            &pmx_led_hdderr1
+                            &pmx_led_hdderr2
+                            &pmx_led_hdderr3>;
+
+               red-hdderr0-led {
+                       label = "linkstation:red:hdderr0";
+                       gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+               };
+
+               red-hdderr1-led {
+                       label = "linkstation:red:hdderr1";
+                       gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+               };
+
+               red-hdderr2-led {
+                       label = "linkstation:red:hdderr2";
+                       gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
+               };
+
+               red-hdderr3-led {
+                       label = "linkstation:red:hdderr3";
+                       gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
+               };
+       };
+
+       regulators {
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+
+               hdd_power1: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "HDD1 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lsvl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lsvl.dts
new file mode 100644 (file)
index 0000000..edcba5c
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-VL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-6282.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-VL";
+       compatible = "buffalo,lsvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+       memory { /* 256 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lswsxl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lswsxl.dts
new file mode 100644 (file)
index 0000000..4b64501
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WSXL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-duo-6281.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-WSXL";
+       compatible = "buffalo,lswsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+       memory { /* 128 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x8000000>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lswvl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lswvl.dts
new file mode 100644 (file)
index 0000000..954ec1d
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WVL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-6282.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-WVL";
+       compatible = "buffalo,lswvl","marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+       memory { /* 256 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd1: pmx-power-hdd1 {
+                               marvell,pins = "mpp9";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr0: pmx-led-hdderr0 {
+                               marvell,pins = "mpp34";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr1: pmx-led-hdderr1 {
+                               marvell,pins = "mpp35";
+                               marvell,function = "gpio";
+                       };
+               };
+
+               sata@80000 {
+                       nr-ports = <2>;
+               };
+       };
+
+       gpio_leds {
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue
+                            &pmx_led_hdderr0
+                            &pmx_led_hdderr1>;
+
+               red-hdderr0-led {
+                       label = "linkstation:red:hdderr0";
+                       gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+               };
+
+               red-hdderr1-led {
+                       label = "linkstation:red:hdderr1";
+                       gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+               };
+       };
+
+       regulators {
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+
+               hdd_power1: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "HDD1 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lswxl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lswxl.dts
new file mode 100644 (file)
index 0000000..ecd5c12
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WXL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-duo-6281.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-WXL";
+       compatible = "buffalo,lswxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+       memory { /* 128 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x8000000>;
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_led_hdderr0: pmx-led-hdderr0 {
+                               marvell,pins = "mpp8";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr1: pmx-led-hdderr1 {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_lock: pmx-fan-lock {
+                               marvell,pins = "mpp40";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_high: pmx-fan-high {
+                               marvell,pins = "mpp47";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_low: pmx-fan-low {
+                               marvell,pins = "mpp48";
+                               marvell,function = "gpio";
+                       };
+               };
+       };
+
+       gpio_leds {
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue
+                            &pmx_led_hdderr0
+                            &pmx_led_hdderr1>;
+
+               red-hdderr0-led {
+                       label = "linkstation:red:hdderr0";
+                       gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+               };
+
+               red-hdderr1-led {
+                       label = "linkstation:red:hdderr1";
+                       gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+               };
+       };
+
+       gpio_fan {
+               compatible = "gpio-fan";
+               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+               pinctrl-names = "default";
+
+               gpios = <&gpio1 16 GPIO_ACTIVE_LOW
+                        &gpio1 15 GPIO_ACTIVE_LOW>;
+
+               gpio-fan,speed-map = <0 3
+                               1500 2
+                               3250 1
+                               5000 0>;
+
+               alarm-gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation.dtsi b/arch/arm/boot/dts/kirkwood-linkstation.dtsi
new file mode 100644 (file)
index 0000000..69061b6
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Device Tree common file for kirkwood based Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+       chosen {
+               bootargs = "console=ttyS0,115200n8 earlyprintk";
+               stdout-path = &uart0;
+       };
+
+       mbus {
+               pcie-controller {
+                       status = "okay";
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd0: pmx-power-hdd0 {
+                               marvell,function = "gpio";
+                       };
+                       pmx_usb_vbus: pmx-usb-vbus {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_alarm: pmx-led-alarm {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_red: pmx-led-function-red {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_blue: pmx-led-function-blue {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_info: pmx-led-info {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_power: pmx-led-power {
+                               marvell,function = "gpio";
+                       };
+                       pmx_button_function: pmx-button-function {
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_switch: pmx-power-switch {
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_auto_switch: pmx-power-auto-switch {
+                               marvell,function = "gpio";
+                       };
+               };
+
+               serial@12000 {
+                       status = "okay";
+               };
+
+               sata@80000 {
+                       status = "okay";
+                       nr-ports = <1>;
+               };
+
+               spi@10600 {
+                       status = "okay";
+
+                       m25p40@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "st,m25p40", "jedec,spi-nor";
+                               reg = <0>;
+                               spi-max-frequency = <25000000>;
+                               mode = <0>;
+
+                               partition@0 {
+                                       reg = <0x0 0x60000>;
+                                       label = "uboot";
+                                       read-only;
+                               };
+
+                               partition@60000 {
+                                       reg = <0x60000 0x10000>;
+                                       label = "dtb";
+                                       read-only;
+                               };
+
+                               partition@70000 {
+                                       reg = <0x70000 0x10000>;
+                                       label = "uboot_env";
+                               };
+                       };
+               };
+       };
+
+       gpio_keys {
+               compatible = "gpio-keys";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_button_function &pmx_power_switch
+                            &pmx_power_auto_switch>;
+               pinctrl-names = "default";
+
+               function-button {
+                       label = "Function Button";
+                       linux,code = <KEY_OPTION>;
+               };
+
+               power-on-switch {
+                       label = "Power-on Switch";
+                       linux,code = <KEY_RESERVED>;
+                       linux,input-type = <5>;
+               };
+
+               power-auto-switch {
+                       label = "Power-auto Switch";
+                       linux,code = <KEY_ESC>;
+                       linux,input-type = <5>;
+               };
+       };
+
+       gpio_leds {
+               compatible = "gpio-leds";
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue>;
+               pinctrl-names = "default";
+       };
+
+       restart_poweroff {
+               compatible = "restart-poweroff";
+       };
+
+       regulators {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_usb_vbus>;
+               pinctrl-names = "default";
+
+               usb_power: regulator@1 {
+                       compatible = "regulator-fixed";
+                       reg = <1>;
+                       regulator-name = "USB Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+
+               hdd_power0: regulator@2 {
+                       compatible = "regulator-fixed";
+                       reg = <2>;
+                       regulator-name = "HDD0 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-lswvl.dts b/arch/arm/boot/dts/kirkwood-lswvl.dts
deleted file mode 100644 (file)
index 09eed3c..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Device Tree file for Buffalo Linkstation LS-WVL/VL
- *
- * Copyright (C) 2015, rogershimizu@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/dts-v1/;
-
-#include "kirkwood.dtsi"
-#include "kirkwood-6282.dtsi"
-
-/ {
-       model = "Buffalo Linkstation LS-WVL/VL";
-       compatible = "buffalo,lswvl", "buffalo,lsvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
-
-       memory { /* 256 MB */
-               device_type = "memory";
-               reg = <0x00000000 0x10000000>;
-       };
-
-       chosen {
-               bootargs = "console=ttyS0,115200n8 earlyprintk";
-               stdout-path = &uart0;
-       };
-
-       mbus {
-               pcie-controller {
-                       status = "okay";
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
-       };
-
-       ocp@f1000000 {
-               pinctrl: pin-controller@10000 {
-                       pmx_power_hdd0: pmx-power-hdd0 {
-                               marvell,pins = "mpp8";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_hdd1: pmx-power-hdd1 {
-                               marvell,pins = "mpp9";
-                               marvell,function = "gpio";
-                       };
-                       pmx_usb_vbus: pmx-usb-vbus {
-                               marvell,pins = "mpp12";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_high: pmx-fan-high {
-                               marvell,pins = "mpp16";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_low: pmx-fan-low {
-                               marvell,pins = "mpp17";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr0: pmx-led-hdderr0 {
-                               marvell,pins = "mpp34";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr1: pmx-led-hdderr1 {
-                               marvell,pins = "mpp35";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_alarm: pmx-led-alarm {
-                               marvell,pins = "mpp36";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_red: pmx-led-function-red {
-                               marvell,pins = "mpp37";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_info: pmx-led-info {
-                               marvell,pins = "mpp38";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_blue: pmx-led-function-blue {
-                               marvell,pins = "mpp39";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_power: pmx-led-power {
-                               marvell,pins = "mpp40";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_lock: pmx-fan-lock {
-                               marvell,pins = "mpp43";
-                               marvell,function = "gpio";
-                       };
-                       pmx_button_function: pmx-button-function {
-                               marvell,pins = "mpp45";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_switch: pmx-power-switch {
-                               marvell,pins = "mpp46";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_auto_switch: pmx-power-auto-switch {
-                               marvell,pins = "mpp47";
-                               marvell,function = "gpio";
-                       };
-               };
-
-               serial@12000 {
-                       status = "okay";
-               };
-
-               sata@80000 {
-                       status = "okay";
-                       nr-ports = <2>;
-               };
-
-               spi@10600 {
-                       status = "okay";
-
-                       m25p40@0 {
-                               #address-cells = <1>;
-                               #size-cells = <1>;
-                               compatible = "st,m25p40", "jedec,spi-nor";
-                               reg = <0>;
-                               spi-max-frequency = <25000000>;
-                               mode = <0>;
-
-                               partition@0 {
-                                       reg = <0x0 0x60000>;
-                                       label = "uboot";
-                                       read-only;
-                               };
-
-                               partition@60000 {
-                                       reg = <0x60000 0x10000>;
-                                       label = "dtb";
-                                       read-only;
-                               };
-
-                               partition@70000 {
-                                       reg = <0x70000 0x10000>;
-                                       label = "uboot_env";
-                               };
-                       };
-               };
-       };
-
-       gpio_keys {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_button_function &pmx_power_switch
-                            &pmx_power_auto_switch>;
-               pinctrl-names = "default";
-
-               button@1 {
-                       label = "Function Button";
-                       linux,code = <KEY_OPTION>;
-                       gpios = <&gpio0 45 GPIO_ACTIVE_LOW>;
-               };
-
-               button@2 {
-                       label = "Power-on Switch";
-                       linux,code = <KEY_RESERVED>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio0 46 GPIO_ACTIVE_LOW>;
-               };
-
-               button@3 {
-                       label = "Power-auto Switch";
-                       linux,code = <KEY_ESC>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio0 47 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_leds {
-               compatible = "gpio-leds";
-               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
-                            &pmx_led_info &pmx_led_power
-                            &pmx_led_function_blue
-                            &pmx_led_hdderr0
-                            &pmx_led_hdderr1>;
-               pinctrl-names = "default";
-
-               led@1 {
-                       label = "lswvl:red:alarm";
-                       gpios = <&gpio0 36 GPIO_ACTIVE_LOW>;
-               };
-
-               led@2 {
-                       label = "lswvl:red:func";
-                       gpios = <&gpio0 37 GPIO_ACTIVE_LOW>;
-               };
-
-               led@3 {
-                       label = "lswvl:amber:info";
-                       gpios = <&gpio0 38 GPIO_ACTIVE_LOW>;
-               };
-
-               led@4 {
-                       label = "lswvl:blue:func";
-                       gpios = <&gpio0 39 GPIO_ACTIVE_LOW>;
-               };
-
-               led@5 {
-                       label = "lswvl:blue:power";
-                       gpios = <&gpio0 40 GPIO_ACTIVE_LOW>;
-                       default-state = "keep";
-               };
-
-               led@6 {
-                       label = "lswvl:red:hdderr0";
-                       gpios = <&gpio0 34 GPIO_ACTIVE_LOW>;
-               };
-
-               led@7 {
-                       label = "lswvl:red:hdderr1";
-                       gpios = <&gpio0 35 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_fan {
-               compatible = "gpio-fan";
-               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-               pinctrl-names = "default";
-
-               gpios = <&gpio0 17 GPIO_ACTIVE_LOW
-                        &gpio0 16 GPIO_ACTIVE_LOW>;
-
-               gpio-fan,speed-map = <0 3
-                               1500 2
-                               3250 1
-                               5000 0>;
-
-               alarm-gpios = <&gpio0 43 GPIO_ACTIVE_HIGH>;
-       };
-
-       restart_poweroff {
-               compatible = "restart-poweroff";
-       };
-
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
-               pinctrl-names = "default";
-
-               usb_power: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "USB Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power0: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "HDD0 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 8 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power1: regulator@3 {
-                       compatible = "regulator-fixed";
-                       reg = <3>;
-                       regulator-name = "HDD1 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
-               };
-       };
-};
-
-&mdio {
-       status = "okay";
-
-       ethphy0: ethernet-phy@0 {
-               device_type = "ethernet-phy";
-               reg = <0>;
-       };
-};
-
-&eth0 {
-       status = "okay";
-
-       ethernet0-port@0 {
-               phy-handle = <&ethphy0>;
-       };
-};
diff --git a/arch/arm/boot/dts/kirkwood-lswxl.dts b/arch/arm/boot/dts/kirkwood-lswxl.dts
deleted file mode 100644 (file)
index f5db16a..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Device Tree file for Buffalo Linkstation LS-WXL/WSXL
- *
- * Copyright (C) 2015, rogershimizu@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/dts-v1/;
-
-#include "kirkwood.dtsi"
-#include "kirkwood-6281.dtsi"
-
-/ {
-       model = "Buffalo Linkstation LS-WXL/WSXL";
-       compatible = "buffalo,lswxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
-
-       memory { /* 128 MB */
-               device_type = "memory";
-               reg = <0x00000000 0x8000000>;
-       };
-
-       chosen {
-               bootargs = "console=ttyS0,115200n8 earlyprintk";
-               stdout-path = &uart0;
-       };
-
-       mbus {
-               pcie-controller {
-                       status = "okay";
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
-       };
-
-       ocp@f1000000 {
-               pinctrl: pin-controller@10000 {
-                       pmx_power_hdd0: pmx-power-hdd0 {
-                               marvell,pins = "mpp28";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_hdd1: pmx-power-hdd1 {
-                               marvell,pins = "mpp29";
-                               marvell,function = "gpio";
-                       };
-                       pmx_usb_vbus: pmx-usb-vbus {
-                               marvell,pins = "mpp37";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_high: pmx-fan-high {
-                               marvell,pins = "mpp47";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_low: pmx-fan-low {
-                               marvell,pins = "mpp48";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr0: pmx-led-hdderr0 {
-                               marvell,pins = "mpp8";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr1: pmx-led-hdderr1 {
-                               marvell,pins = "mpp46";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_alarm: pmx-led-alarm {
-                               marvell,pins = "mpp49";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_red: pmx-led-function-red {
-                               marvell,pins = "mpp34";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_blue: pmx-led-function-blue {
-                               marvell,pins = "mpp36";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_info: pmx-led-info {
-                               marvell,pins = "mpp38";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_power: pmx-led-power {
-                               marvell,pins = "mpp39";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_lock: pmx-fan-lock {
-                               marvell,pins = "mpp40";
-                               marvell,function = "gpio";
-                       };
-                       pmx_button_function: pmx-button-function {
-                               marvell,pins = "mpp41";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_switch: pmx-power-switch {
-                               marvell,pins = "mpp42";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_auto_switch: pmx-power-auto-switch {
-                               marvell,pins = "mpp43";
-                               marvell,function = "gpio";
-                       };
-               };
-
-               serial@12000 {
-                       status = "okay";
-               };
-
-               sata@80000 {
-                       status = "okay";
-                       nr-ports = <2>;
-               };
-
-               spi@10600 {
-                       status = "okay";
-
-                       m25p40@0 {
-                               #address-cells = <1>;
-                               #size-cells = <1>;
-                               compatible = "st,m25p40", "jedec,spi-nor";
-                               reg = <0>;
-                               spi-max-frequency = <25000000>;
-                               mode = <0>;
-
-                               partition@0 {
-                                       reg = <0x0 0x60000>;
-                                       label = "uboot";
-                                       read-only;
-                               };
-
-                               partition@60000 {
-                                       reg = <0x60000 0x10000>;
-                                       label = "dtb";
-                                       read-only;
-                               };
-
-                               partition@70000 {
-                                       reg = <0x70000 0x10000>;
-                                       label = "uboot_env";
-                               };
-                       };
-               };
-       };
-
-       gpio_keys {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_button_function &pmx_power_switch
-                            &pmx_power_auto_switch>;
-               pinctrl-names = "default";
-
-               button@1 {
-                       label = "Function Button";
-                       linux,code = <KEY_OPTION>;
-                       gpios = <&gpio1 41 GPIO_ACTIVE_LOW>;
-               };
-
-               button@2 {
-                       label = "Power-on Switch";
-                       linux,code = <KEY_RESERVED>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio1 42 GPIO_ACTIVE_LOW>;
-               };
-
-               button@3 {
-                       label = "Power-auto Switch";
-                       linux,code = <KEY_ESC>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio1 43 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_leds {
-               compatible = "gpio-leds";
-               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
-                            &pmx_led_info &pmx_led_power
-                            &pmx_led_function_blue
-                            &pmx_led_hdderr0
-                            &pmx_led_hdderr1>;
-               pinctrl-names = "default";
-
-               led@1 {
-                       label = "lswxl:blue:func";
-                       gpios = <&gpio1 36 GPIO_ACTIVE_LOW>;
-               };
-
-               led@2 {
-                       label = "lswxl:red:alarm";
-                       gpios = <&gpio1 49 GPIO_ACTIVE_LOW>;
-               };
-
-               led@3 {
-                       label = "lswxl:amber:info";
-                       gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
-               };
-
-               led@4 {
-                       label = "lswxl:blue:power";
-                       gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
-               };
-
-               led@5 {
-                       label = "lswxl:red:func";
-                       gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
-                       default-state = "keep";
-               };
-
-               led@6 {
-                       label = "lswxl:red:hdderr0";
-                       gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
-               };
-
-               led@7 {
-                       label = "lswxl:red:hdderr1";
-                       gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_fan {
-               compatible = "gpio-fan";
-               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-               pinctrl-names = "default";
-
-               gpios = <&gpio0 47 GPIO_ACTIVE_LOW
-                        &gpio0 48 GPIO_ACTIVE_LOW>;
-
-               gpio-fan,speed-map = <0 3
-                               1500 2
-                               3250 1
-                               5000 0>;
-
-               alarm-gpios = <&gpio1 49 GPIO_ACTIVE_HIGH>;
-       };
-
-       restart_poweroff {
-               compatible = "restart-poweroff";
-       };
-
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
-               pinctrl-names = "default";
-
-               usb_power: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "USB Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 37 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power0: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "HDD0 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 28 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power1: regulator@3 {
-                       compatible = "regulator-fixed";
-                       reg = <3>;
-                       regulator-name = "HDD1 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
-               };
-       };
-};
-
-&mdio {
-       status = "okay";
-
-       ethphy1: ethernet-phy@8 {
-               device_type = "ethernet-phy";
-               reg = <8>;
-       };
-};
-
-&eth1 {
-       status = "okay";
-
-       ethernet1-port@0 {
-               phy-handle = <&ethphy1>;
-       };
-};
index 887b9c1fee43869a2b09336eb283116d3623e2af..96ff59d68f445f45a5182a12d6d42a3883ede881 100644 (file)
@@ -20,6 +20,9 @@
        compatible = "marvell,openrd-client", "marvell,openrd", "marvell,kirkwood-88f6281", "marvell,kirkwood";
 
        ocp@f1000000 {
+               audio-controller@a0000 {
+                       status = "okay";
+               };
                i2c@11000 {
                        status = "okay";
                        clock-frequency = <400000>;
@@ -27,6 +30,7 @@
                        cs42l51: cs42l51@4a {
                                compatible = "cirrus,cs42l51";
                                reg = <0x4a>;
+                               #sound-dai-cells = <0>;
                        };
                };
        };
@@ -37,7 +41,7 @@
                simple-audio-card,mclk-fs = <256>;
 
                simple-audio-card,cpu {
-                       sound-dai = <&audio0>;
+                       sound-dai = <&audio0 0>;
                };
 
                simple-audio-card,codec {
index d3330dadf7edd5da9ae9d35f10019a3b44555942..24f1d30970a0650b535bcd67395c0cea8ace7935 100644 (file)
@@ -40,7 +40,7 @@
                        pinctrl-0 = <&pmx_select28 &pmx_sdio_cd &pmx_select34>;
                        pinctrl-names = "default";
 
-                       pmx_select28: pmx-select-uart-sd {
+                       pmx_select28: pmx-select-rs232-rs485 {
                                marvell,pins = "mpp28";
                                marvell,function = "gpio";
                        };
@@ -48,7 +48,7 @@
                                marvell,pins = "mpp29";
                                marvell,function = "gpio";
                        };
-                       pmx_select34: pmx-select-rs232-rs484 {
+                       pmx_select34: pmx-select-uart-sd {
                                marvell,pins = "mpp34";
                                marvell,function = "gpio";
                        };
                        status = "okay";
                        cd-gpios = <&gpio0 29 9>;
                };
+               gpio@10100 {
+                       p28 {
+                               gpio-hog;
+                               gpios = <28 GPIO_ACTIVE_HIGH>;
+                               /*
+                                * SelRS232or485 selects between RS-232 or RS-485
+                                * mode for the second UART.
+                                *
+                                * Low: RS-232
+                                * High: RS-485
+                                *
+                                * To use the second UART, you need to change also
+                                * the SelUARTorSD.
+                                */
+                               output-low;
+                               line-name = "SelRS232or485";
+                       };
+               };
+               gpio@10140 {
+                       p2 {
+                               gpio-hog;
+                               gpios = <2 GPIO_ACTIVE_HIGH>;
+                               /*
+                                * SelUARTorSD selects between the second UART
+                                * (serial@12100) and SD (mvsdio@90000).
+                                *
+                                * Low: UART
+                                * High: SD
+                                *
+                                * When changing this line make sure the newly
+                                * selected device node is enabled and the
+                                * previously selected device node is disabled.
+                                */
+                               output-high; /* Select SD by default */
+                               line-name = "SelUARTorSD";
+                       };
+               };
        };
 };
 
index 1db6f2c506cce3209e64418493039046c79fd0ec..8082d64266a37c33c8fa2e7e722d3683519c3d0a 100644 (file)
        chip-delay = <40>;
        status = "okay";
        partitions {
+               compatible = "fixed-partitions";
                #address-cells = <1>;
                #size-cells = <1>;
 
index 7b5a4a18f49cb6981064c213805652df0911cb90..7445a15e259d05c3a51666694e2858bf3b6fb06c 100644 (file)
 
                audio0: audio-controller@a0000 {
                        compatible = "marvell,kirkwood-audio";
-                       #sound-dai-cells = <0>;
+                       #sound-dai-cells = <1>;
                        reg = <0xa0000 0x2210>;
                        interrupts = <24>;
                        clocks = <&gate_clk 9>;
index 7fed0bd4f3deea46281881ead81b2ebd29c5fa4b..00805322367e7eb73bcc4ba5bda124eee5372467 100644 (file)
        clock-frequency = <400000>;
 };
 
-&i2c2 {
-       clock-frequency = <400000>;
-};
-
-&i2c3 {
-       clock-frequency = <400000>;
-};
-
 /*
  * Only found on the wireless SOM. For the SOM without wireless, the pins for
  * MMC3 can be routed with jumpers to the second MMC slot on the devkit and
                interrupt-parent = <&gpio5>;
                interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
                ref-clock-frequency = <26000000>;
+               tcxo-clock-frequency = <26000000>;
        };
 };
 
diff --git a/arch/arm/boot/dts/mvebu-linkstation-fan.dtsi b/arch/arm/boot/dts/mvebu-linkstation-fan.dtsi
new file mode 100644 (file)
index 0000000..e211a3c
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Device Tree common file for gpio-fan on Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+       gpio_fan {
+               compatible = "gpio-fan";
+               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+               pinctrl-names = "default";
+
+               gpio-fan,speed-map =
+                       <0              3
+                       1500    2
+                       3250    1
+                       5000    0>;
+       };
+};
+
+&pinctrl {
+       pmx_fan_low: pmx-fan-low {
+               marvell,function = "gpio";
+       };
+
+       pmx_fan_high: pmx-fan-high {
+               marvell,function = "gpio";
+       };
+
+       pmx_fan_lock: pmx-fan-lock {
+               marvell,function = "gpio";
+       };
+};
diff --git a/arch/arm/boot/dts/mvebu-linkstation-gpio-simple.dtsi b/arch/arm/boot/dts/mvebu-linkstation-gpio-simple.dtsi
new file mode 100644 (file)
index 0000000..68d75e7
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Device Tree common file for gpio-{keys,leds} on Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+
+/ {
+       gpio_keys {
+               compatible = "gpio-keys";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_power_switch>;
+               pinctrl-names = "default";
+
+               power-on-switch {
+                       label = "Power-on Switch";
+                       linux,code = <KEY_RESERVED>;
+                       linux,input-type = <5>;
+               };
+
+               power-auto-switch {
+                       label = "Power-auto Switch";
+                       linux,code = <KEY_ESC>;
+                       linux,input-type = <5>;
+               };
+       };
+
+       gpio_leds {
+               compatible = "gpio-leds";
+               pinctrl-0 = <&pmx_led_power &pmx_led_alarm &pmx_led_info>;
+               pinctrl-names = "default";
+
+               blue-power-led {
+                       label = "linkstation:blue:power";
+                       default-state = "keep";
+               };
+
+               red-alarm-led {
+                       label = "linkstation:red:alarm";
+               };
+
+               amber-info-led {
+                       label = "linkstation:amber:info";
+               };
+       };
+};
+
+&pinctrl {
+       pmx_power_switch: pmx-power-switch {
+               marvell,function = "gpio";
+       };
+
+       pmx_led_power: pmx-leds {
+               marvell,function = "gpio";
+       };
+
+       pmx_led_alarm: pmx-leds {
+               marvell,function = "gpio";
+       };
+
+       pmx_led_info: pmx-leds {
+               marvell,function = "gpio";
+       };
+};
index 74d8f7eb556399e489eaebc7e754a4515a96f57f..20603f3f9bd07e273523398bca74256a3fd67150 100644 (file)
                amstaos,cover-comp-gain = <16>;
        };
 
+       adp1653: led-controller@30 {
+               compatible = "adi,adp1653";
+               reg = <0x30>;
+               enable-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; /* 88 */
+
+               flash {
+                       flash-timeout-us = <500000>;
+                       flash-max-microamp = <320000>;
+                       led-max-microamp = <50000>;
+               };
+               indicator {
+                       led-max-microamp = <17500>;
+               };
+       };
+
        lp5523: lp5523@32 {
                compatible = "national,lp5523";
                reg = <0x32>;
index a2c2b8d8dd2c70a2ab5a8007b0cc2e24faeaafd0..ab1174bb409e3c9631a32d66d7fe7d6a2ab7319a 100644 (file)
                startup-delay-us = <150>;
                enable-active-high;
        };
+
+       vwlan_fixed: fixedregulator@2 {
+               compatible = "regulator-fixed";
+               regulator-name = "VWLAN";
+               gpio = <&gpio2 3 GPIO_ACTIVE_HIGH>; /* gpio 35 */
+               enable-active-high;
+               regulator-boot-off;
+       };
 };
 
 &omap3_pmx_core {
                        OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */
                >;
        };
+
+       wlan_pins: pinmux_wlan_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE4) /* gpio 35 - wlan enable */
+                       OMAP3_CORE1_IOPAD(0x208a, PIN_INPUT | MUX_MODE4) /* gpio 42 - wlan irq */
+               >;
+       };
 };
 
 &i2c1 {
index 0885b34d5d7da252953c3aaef287a1e20b9dda63..e5967262f28bdffc240255affed510d47129f444 100644 (file)
        compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
 };
 
+&omap3_pmx_core {
+       spi4_pins: pinmux_spi4_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x218c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mcspi4_clk */
+                       OMAP3_CORE1_IOPAD(0x2190, PIN_OUTPUT | MUX_MODE1) /* mcspi4_simo */
+                       OMAP3_CORE1_IOPAD(0x2192, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mcspi4_somi */
+                       OMAP3_CORE1_IOPAD(0x2196, PIN_OUTPUT | MUX_MODE1) /* mcspi4_cs0 */
+               >;
+       };
+};
+
 &i2c2 {
        smia_1: camera@10 {
                compatible = "nokia,smia";
                };
        };
 };
+
+&mcspi4 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&spi4_pins>;
+
+       wlcore: wlcore@0 {
+               compatible = "ti,wl1271";
+               pinctrl-names = "default";
+               pinctrl-0 = <&wlan_pins>;
+               reg = <0>;
+               spi-max-frequency = <48000000>;
+               clock-xtal;
+               ref-clock-frequency = <38400000>;
+               interrupts-extended = <&gpio2 10 IRQ_TYPE_LEVEL_HIGH>; /* gpio 42 */
+               vwlan-supply = <&vwlan_fixed>;
+       };
+};
index 4f6b2d5b1902e96114f12b1f8bd7d15ba0b9819e..387dc31822fe9c8b2729d28da389d410ffb55fe7 100644 (file)
                                #size-cells = <0>;
                        };
                };
+
+               bandgap {
+                       reg = <0x48002524 0x4>;
+                       compatible = "ti,omap34xx-bandgap";
+                       #thermal-sensor-cells = <0>;
+               };
        };
 };
 
index 86253de5a97a99ddb12ea5f41c72f3c59279fced..f19c87bd6bf35b249543495810df3be423b5cd55 100644 (file)
                                #size-cells = <0>;
                        };
                };
+
+               bandgap {
+                       reg = <0x48002524 0x4>;
+                       compatible = "ti,omap36xx-bandgap";
+                       #thermal-sensor-cells = <0>;
+               };
        };
 };
 
index 888412c63f97492ef445368c3208434d748912f2..902657d6713b073df82d33158d3d1cd0fac5ef16 100644 (file)
        };
 };
 
+&gpio8 {
+       /* TI trees use GPIO instead of msecure, see also muxing */
+       p234 {
+               gpio-hog;
+               gpios = <10 GPIO_ACTIVE_HIGH>;
+               output-high;
+               line-name = "gpio8_234/msecure";
+       };
+};
+
 &omap5_pmx_core {
        pinctrl-names = "default";
        pinctrl-0 = <
                >;
        };
 
+       /* TI trees use GPIO mode; msecure mode does not work reliably? */
+       palmas_msecure_pins: palmas_msecure_pins {
+               pinctrl-single,pins = <
+                       OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
+               >;
+       };
+
        usbhost_pins: pinmux_usbhost_pins {
                pinctrl-single,pins = <
                        OMAP5_IOPAD(0x0c4, PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
                        &usbhost_wkup_pins
        >;
 
+       palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+               pinctrl-single,pins = <
+                       OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+               >;
+       };
+
        usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
                pinctrl-single,pins = <
                        OMAP5_IOPAD(0x05a, PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
                interrupt-controller;
                #interrupt-cells = <2>;
                ti,system-power-controller;
+               pinctrl-names = "default";
+               pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
 
                extcon_usb3: palmas_usb {
                        compatible = "ti,palmas-usb-vid";
                        #clock-cells = <0>;
                };
 
+               rtc {
+                       compatible = "ti,palmas-rtc";
+                       interrupt-parent = <&palmas>;
+                       interrupts = <8 IRQ_TYPE_NONE>;
+                       ti,backup-battery-chargeable;
+                       ti,backup-battery-charge-high-current;
+               };
+
                palmas_pmic {
                        compatible = "ti,palmas-pmic";
                        interrupt-parent = <&palmas>;
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
new file mode 100644 (file)
index 0000000..1cf644b
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-GL
+ *       (also known as Buffalo Linkstation Pro/Live)
+ *
+ * Copyright (C) 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * Based on the board file arch/arm/mach-orion5x/kurobox_pro-setup.c
+ * Copyright (C) Ronen Shitrit <rshitrit@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "orion5x-linkstation.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+       model = "Buffalo Linkstation Pro/Live";
+       compatible = "buffalo,lsgl", "marvell,orion5x-88f5182", "marvell,orion5x";
+
+       memory { /* 128 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x8000000>;
+       };
+};
+
+&pinctrl {
+       pmx_power_hdd: pmx-power-hdd {
+               marvell,pins = "mpp1";
+               marvell,function = "gpio";
+       };
+
+       pmx_power_usb: pmx-power-usb {
+               marvell,pins = "mpp9";
+               marvell,function = "gpio";
+       };
+};
+
+&hdd_power {
+       gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+};
+
+&usb_power {
+       gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+};
+
+&ehci1 {
+       status = "okay";
+};
index 3daec912b4bf118edba55ef54fdf633413e696e5..0eead400f42770e46f44250ef1629cdb295a1422 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Buffalo Linkstation LS-WTGL
  *
- * Copyright (C) 2015, Roger Shimizu <rogershimizu@gmail.com>
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
 
 /dts-v1/;
 
+#include "orion5x-linkstation.dtsi"
+#include "mvebu-linkstation-gpio-simple.dtsi"
+#include "mvebu-linkstation-fan.dtsi"
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include "orion5x-mv88f5182.dtsi"
 
 / {
        model = "Buffalo Linkstation LS-WTGL";
                reg = <0x00000000 0x4000000>;
        };
 
-       chosen {
-               bootargs = "console=ttyS0,115200n8 earlyprintk";
-               linux,stdout-path = &uart0;
-       };
-
-       soc {
-               ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
-                        <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
-                        <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>;
-
-               internal-regs {
-                       pinctrl: pinctrl@10000 {
-                               pinctrl-0 = <&pmx_usb_power &pmx_power_hdd
-                                       &pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-                               pinctrl-names = "default";
-
-                               pmx_led_power: pmx-leds {
-                                       marvell,pins = "mpp0";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_led_alarm: pmx-leds {
-                                       marvell,pins = "mpp2";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_led_info: pmx-leds {
-                                       marvell,pins = "mpp3";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_power_hdd: pmx-power-hdd {
-                                       marvell,pins = "mpp1";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_usb_power: pmx-usb-power {
-                                       marvell,pins = "mpp9";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_sata0: pmx-sata0 {
-                                       marvell,pins = "mpp12";
-                                       marvell,function = "sata0";
-                               };
-
-                               pmx_sata1: pmx-sata1 {
-                                       marvell,pins = "mpp13";
-                                       marvell,function = "sata1";
-                               };
-
-                               pmx_fan_high: pmx-fan-high {
-                                       marvell,pins = "mpp14";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_fan_low: pmx-fan-low {
-                                       marvell,pins = "mpp17";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_fan_lock: pmx-fan-lock {
-                                       marvell,pins = "mpp6";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_power_switch: pmx-power-switch {
-                                       marvell,pins = "mpp8", "mpp10";
-                                       marvell,function = "gpio";
-                               };
-                       };
-               };
-       };
-
        gpio_keys {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_switch>;
-               pinctrl-names = "default";
-
-               button@1 {
-                       label = "Power-on Switch";
-                       linux,code = <KEY_RESERVED>;
-                       linux,input-type = <5>;
+               power-on-switch {
                        gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
                };
 
-               button@2 {
-                       label = "Power-auto Switch";
-                       linux,code = <KEY_ESC>;
-                       linux,input-type = <5>;
+               power-auto-switch {
                        gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
                };
        };
 
        gpio_leds {
-               compatible = "gpio-leds";
-               pinctrl-0 = <&pmx_led_power &pmx_led_alarm
-                            &pmx_led_info>;
-               pinctrl-names = "default";
-
-               led@1 {
-                       label = "lswtgl:blue:power";
+               blue-power-led {
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
                };
 
-               led@2 {
-                       label = "lswtgl:red:alarm";
+               red-alarm-led {
                        gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
                };
 
-               led@3 {
-                       label = "lswtgl:amber:info";
+               amber-info-led {
                        gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
                };
        };
 
        gpio_fan {
-               compatible = "gpio-fan";
-               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-               pinctrl-names = "default";
-
                gpios = <&gpio0 14 GPIO_ACTIVE_LOW
                         &gpio0 17 GPIO_ACTIVE_LOW>;
 
-               gpio-fan,speed-map = <0 3
-                               1500 2
-                               3250 1
-                               5000 0>;
-
-               alarm-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+               alarm-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
        };
+};
 
-       restart_poweroff {
-               compatible = "restart-poweroff";
+&pinctrl {
+       pmx_led_power: pmx-leds {
+               marvell,pins = "mpp0";
+               marvell,function = "gpio";
        };
 
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_hdd &pmx_usb_power>;
-               pinctrl-names = "default";
+       pmx_power_hdd: pmx-power-hdd {
+               marvell,pins = "mpp1";
+               marvell,function = "gpio";
+       };
 
-               usb_power: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "USB Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
-               };
+       pmx_led_alarm: pmx-leds {
+               marvell,pins = "mpp2";
+               marvell,function = "gpio";
+       };
 
-               hdd_power: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "HDD Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
-               };
+       pmx_led_info: pmx-leds {
+               marvell,pins = "mpp3";
+               marvell,function = "gpio";
        };
-};
 
-&mdio {
-       status = "okay";
+       pmx_fan_lock: pmx-fan-lock {
+               marvell,pins = "mpp6";
+               marvell,function = "gpio";
+       };
 
-       ethphy: ethernet-phy {
-               reg = <8>;
+       pmx_power_switch: pmx-power-switch {
+               marvell,pins = "mpp8", "mpp10";
+               marvell,function = "gpio";
        };
-};
 
-&eth {
-       status = "okay";
+       pmx_power_usb: pmx-power-usb {
+               marvell,pins = "mpp9";
+               marvell,function = "gpio";
+       };
 
-       ethernet-port@0 {
-               phy-handle = <&ethphy>;
+       pmx_fan_high: pmx-fan-high {
+               marvell,pins = "mpp14";
+               marvell,function = "gpio";
        };
-};
 
-&ehci0 {
-       status = "okay";
+       pmx_fan_low: pmx-fan-low {
+               marvell,pins = "mpp17";
+               marvell,function = "gpio";
+       };
 };
 
-&i2c {
-       status = "okay";
-
-       rtc {
-               compatible = "ricoh,rs5c372a";
-               reg = <0x32>;
-       };
+&hdd_power {
+       gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
 };
 
-&wdt {
-       status = "disabled";
+&usb_power {
+       gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
 };
 
 &sata {
-       pinctrl-0 = <&pmx_sata0 &pmx_sata1>;
-       pinctrl-names = "default";
-       status = "okay";
        nr-ports = <2>;
 };
-
-&uart0 {
-       status = "okay";
-};
diff --git a/arch/arm/boot/dts/orion5x-linkstation.dtsi b/arch/arm/boot/dts/orion5x-linkstation.dtsi
new file mode 100644 (file)
index 0000000..ed456ab
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Device Tree common file for orion5x based Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "orion5x-mv88f5182.dtsi"
+
+/ {
+       chosen {
+               bootargs = "console=ttyS0,115200n8 earlyprintk";
+               linux,stdout-path = &uart0;
+       };
+
+       soc {
+               ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
+                                <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
+                                <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>;
+       };
+
+       restart_poweroff {
+               compatible = "restart-poweroff";
+       };
+
+       regulators {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_power_usb &pmx_power_hdd>;
+               pinctrl-names = "default";
+
+               usb_power: regulator@1 {
+                       compatible = "regulator-fixed";
+                       reg = <1>;
+                       regulator-name = "USB Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+
+               hdd_power: regulator@2 {
+                       compatible = "regulator-fixed";
+                       reg = <2>;
+                       regulator-name = "HDD Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+       };
+};
+
+&pinctrl {
+       pmx_power_hdd: pmx-power-hdd {
+               marvell,function = "gpio";
+       };
+
+       pmx_power_usb: pmx-power-usb {
+               marvell,function = "gpio";
+       };
+};
+
+&devbus_bootcs {
+       status = "okay";
+       devbus,keep-config;
+
+       flash@0 {
+               compatible = "jedec-flash";
+               reg = <0 0x40000>;
+               bank-width = <1>;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       header@0 {
+                               reg = <0 0x30000>;
+                               read-only;
+                       };
+
+                       uboot@30000 {
+                               reg = <0x30000 0xF000>;
+                               read-only;
+                       };
+
+                       uboot_env@3F000 {
+                               reg = <0x3F000 0x1000>;
+                       };
+               };
+       };
+};
+
+&mdio {
+       status = "okay";
+
+       ethphy: ethernet-phy {
+               reg = <8>;
+       };
+};
+
+&eth {
+       status = "okay";
+
+       ethernet-port@0 {
+               phy-handle = <&ethphy>;
+       };
+};
+
+&ehci0 {
+       status = "okay";
+};
+
+&i2c {
+       status = "okay";
+
+       rtc {
+               compatible = "ricoh,rs5c372a";
+               reg = <0x32>;
+       };
+};
+
+&wdt {
+       status = "disabled";
+};
+
+&sata {
+       status = "okay";
+       nr-ports = <1>;
+};
+
+&uart0 {
+       status = "okay";
+};
+
+&uart1 {
+       status = "okay";
+};
index ed521e85e208e72bd7e7afd96ac8acb1a5ab77a3..394c43bf0ae7a6db4ea3a5e190801f34269f844a 100644 (file)
                                          <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
                        interrupt-names = "ack", "err", "wakeup";
 
+                       rpmcc: clock-controller {
+                               compatible      = "qcom,rpmcc-apq8064", "qcom,rpmcc";
+                               #clock-cells = <1>;
+                       };
+
                        regulators {
                                compatible = "qcom,rpm-pm8921-regulators";
 
index 08214cbae16da84c0f191661528d40bc132e09b3..a33a09f6821edb883da87e0c37d837bd9a468b8d 100644 (file)
                interrupts = <1 7 0xf04>;
        };
 
+       clocks {
+               xo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+       };
+
        timer {
                compatible = "arm,armv7-timer";
                interrupts = <1 2 0xf08>,
index fa698635eea0d1f859d57c766c679438a416b55d..2601a907947b97f4e58c8e07d1524afa2aad02d1 100644 (file)
        };
 
        clocks {
+               cxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               pxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <27000000>;
+               };
+
                sleep_clk: sleep_clk {
                        compatible = "fixed-clock";
                        clock-frequency = <32768>;
index e5f7f33aa4677739f9bc1e6d57d969db9b856cf9..a4b184db21d08a5c7f38f4edd20912425023f025 100644 (file)
                interrupts = <1 9 0x304>;
        };
 
+       clocks {
+               cxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               pxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <27000000>;
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+       };
+
        soc: soc {
                #address-cells = <1>;
                #size-cells = <1>;
index dfdafdcb8aae99711507bc4abfbbd7374ba39205..c7bd2032a95dbd863b20d54b34df59c85ee9e843 100644 (file)
                #size-cells = <1>;
                ranges;
 
+               mpss@08000000 {
+                       reg = <0x08000000 0x5100000>;
+                       no-map;
+               };
+
+               mba@00d100000 {
+                       reg = <0x0d100000 0x100000>;
+                       no-map;
+               };
+
+               reserved@0d200000 {
+                       reg = <0x0d200000 0xa00000>;
+                       no-map;
+               };
+
+               adsp@0dc00000 {
+                       reg = <0x0dc00000 0x1900000>;
+                       no-map;
+               };
+
+               venus@0f500000 {
+                       reg = <0x0f500000 0x500000>;
+                       no-map;
+               };
+
                smem_region: smem@fa00000 {
                        reg = <0xfa00000 0x200000>;
                        no-map;
                };
+
+               tz@0fc00000 {
+                       reg = <0x0fc00000 0x160000>;
+                       no-map;
+               };
+
+               efs@0fd600000 {
+                       reg = <0x0fd60000 0x1a0000>;
+                       no-map;
+               };
+
+               unused@0ff00000 {
+                       reg = <0x0ff00000 0x10100000>;
+                       no-map;
+               };
+       };
+
+       firmware {
+               compatible = "simple-bus";
+
+               scm {
+                       compatible = "qcom,scm";
+                       clocks = <&gcc GCC_CE1_CLK> , <&gcc GCC_CE1_AXI_CLK>,
+                                <&gcc GCC_CE1_AHB_CLK>;
+                       clock-names = "core", "bus", "iface";
+               };
        };
 
        cpus {
                interrupts = <1 7 0xf04>;
        };
 
+       clocks {
+               xo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+       };
+
        timer {
                compatible = "arm,armv7-timer";
                interrupts = <1 2 0xf08>,
                hwlocks = <&tcsr_mutex 3>;
        };
 
+       smp2p-wcnss {
+               compatible = "qcom,smp2p";
+               qcom,smem = <451>, <431>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <0 143 IRQ_TYPE_EDGE_RISING>;
+
+               qcom,ipc = <&apcs 8 18>;
+
+               qcom,local-pid = <0>;
+               qcom,remote-pid = <4>;
+
+               wcnss_smp2p_out: master-kernel {
+                       qcom,entry-name = "master-kernel";
+
+                       #qcom,state-cells = <1>;
+               };
+
+               wcnss_smp2p_in: slave-kernel {
+                       qcom,entry-name = "slave-kernel";
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+       };
+
+       smsm {
+               compatible = "qcom,smsm";
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               qcom,ipc-1 = <&apcs 8 13>;
+               qcom,ipc-2 = <&apcs 8 9>;
+               qcom,ipc-3 = <&apcs 8 19>;
+
+               apps_smsm: apps@0 {
+                       reg = <0>;
+
+                       #qcom,state-cells = <1>;
+               };
+
+               modem_smsm: modem@1 {
+                       reg = <1>;
+                       interrupts = <0 26 IRQ_TYPE_EDGE_RISING>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               adsp_smsm: adsp@2 {
+                       reg = <2>;
+                       interrupts = <0 157 IRQ_TYPE_EDGE_RISING>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               wcnss_smsm: wcnss@7 {
+                       reg = <7>;
+                       interrupts = <0 144 IRQ_TYPE_EDGE_RISING>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+       };
+
        soc: soc {
                #address-cells = <1>;
                #size-cells = <1>;
index 4657d7fb5bceede5ea747359257fc68ee9b434ad..89e46ebef1bca7ce3dd02399a2a264f0a903a687 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <dt-bindings/clock/r7s72100-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        scif0: serial@e8007000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8007000 64>;
-               interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 191 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 192 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif1: serial@e8007800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8007800 64>;
-               interrupts = <0 194 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 195 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 196 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 193 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif2: serial@e8008000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8008000 64>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 200 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 197 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif3: serial@e8008800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8008800 64>;
-               interrupts = <0 202 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 203 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 204 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 201 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif4: serial@e8009000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8009000 64>;
-               interrupts = <0 206 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 207 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 208 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 205 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif5: serial@e8009800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8009800 64>;
-               interrupts = <0 210 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 211 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 212 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 209 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif6: serial@e800a000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe800a000 64>;
-               interrupts = <0 214 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 215 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 216 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 213 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF6>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif7: serial@e800a800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe800a800 64>;
-               interrupts = <0 218 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 219 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 220 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 217 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF7>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        spi0: spi@e800c800 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800c800 0x24>;
-               interrupts = <0 238 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 239 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 240 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI0>;
                power-domains = <&cpg_clocks>;
        spi1: spi@e800d000 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800d000 0x24>;
-               interrupts = <0 241 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 242 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 243 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI1>;
                power-domains = <&cpg_clocks>;
        spi2: spi@e800d800 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800d800 0x24>;
-               interrupts = <0 244 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 245 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI2>;
                power-domains = <&cpg_clocks>;
        spi3: spi@e800e000 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800e000 0x24>;
-               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 248 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 249 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI3>;
                power-domains = <&cpg_clocks>;
        spi4: spi@e800e800 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800e800 0x24>;
-               interrupts = <0 250 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 251 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 252 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI4>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfee000 0x44>;
-               interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 158 IRQ_TYPE_EDGE_RISING>,
-                            <0 159 IRQ_TYPE_EDGE_RISING>,
-                            <0 160 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 161 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 162 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 163 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 164 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 159 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C0>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfee400 0x44>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 166 IRQ_TYPE_EDGE_RISING>,
-                            <0 167 IRQ_TYPE_EDGE_RISING>,
-                            <0 168 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 169 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 170 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 171 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 172 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 167 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C1>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfee800 0x44>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 174 IRQ_TYPE_EDGE_RISING>,
-                            <0 175 IRQ_TYPE_EDGE_RISING>,
-                            <0 176 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 177 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 178 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 179 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 180 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 174 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 175 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C2>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfeec00 0x44>;
-               interrupts = <0 181 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 182 IRQ_TYPE_EDGE_RISING>,
-                            <0 183 IRQ_TYPE_EDGE_RISING>,
-                            <0 184 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 185 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 186 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 187 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 182 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 183 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C3>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
        mtu2: timer@fcff0000 {
                compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
                reg = <0xfcff0000 0x400>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "tgi0a";
                clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
                clock-names = "fck";
index cb4f7b2798fe23be13facdffaef64430cf168ab1..138414a7d17037814aac69a3327d7107ccb24eb5 100644 (file)
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        dbsc1: memory-controller@e6790000 {
                dma0: dma-controller@e6700020 {
                        compatible = "renesas,shdma-r8a73a4";
                        reg = <0 0xe6700020 0 0x89e0>;
-                       interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                                       0 200 IRQ_TYPE_LEVEL_HIGH
-                                       0 201 IRQ_TYPE_LEVEL_HIGH
-                                       0 202 IRQ_TYPE_LEVEL_HIGH
-                                       0 203 IRQ_TYPE_LEVEL_HIGH
-                                       0 204 IRQ_TYPE_LEVEL_HIGH
-                                       0 205 IRQ_TYPE_LEVEL_HIGH
-                                       0 206 IRQ_TYPE_LEVEL_HIGH
-                                       0 207 IRQ_TYPE_LEVEL_HIGH
-                                       0 208 IRQ_TYPE_LEVEL_HIGH
-                                       0 209 IRQ_TYPE_LEVEL_HIGH
-                                       0 210 IRQ_TYPE_LEVEL_HIGH
-                                       0 211 IRQ_TYPE_LEVEL_HIGH
-                                       0 212 IRQ_TYPE_LEVEL_HIGH
-                                       0 213 IRQ_TYPE_LEVEL_HIGH
-                                       0 214 IRQ_TYPE_LEVEL_HIGH
-                                       0 215 IRQ_TYPE_LEVEL_HIGH
-                                       0 216 IRQ_TYPE_LEVEL_HIGH
-                                       0 217 IRQ_TYPE_LEVEL_HIGH
-                                       0 218 IRQ_TYPE_LEVEL_HIGH
-                                       0 219 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "error",
                                        "ch0", "ch1", "ch2", "ch3",
                                        "ch4", "ch5", "ch6", "ch7",
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe60b0000 0 0x428>;
-               interrupts = <0 179 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IIC5>;
                power-domains = <&pd_a3sp>;
 
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a73a4", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 4 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 5 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 6 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 7 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 8 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 9 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 10 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 11 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 18 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 19 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 20 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 21 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 22 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 23 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 24 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 25 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 26 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 27 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 28 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 29 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 30 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IRQC>;
                power-domains = <&pd_c4>;
        };
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0200 0 0x200>;
-               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 34 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 35 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 39 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 40 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 41 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 42 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 43 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 44 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 45 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 46 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 47 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 48 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 49 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 50 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 51 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 52 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 53 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 54 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 55 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 56 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 57 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IRQC>;
                power-domains = <&pd_c4>;
        };
                compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>,
                         <0 0xe61f0200 0 0x38>, <0 0xe61f0300 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
                power-domains = <&pd_c5>;
        };
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6500000 0 0x428>;
-               interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC0>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6510000 0 0x428>;
-               interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC1>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6520000 0 0x428>;
-               interrupts = <0 176 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC2>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6530000 0 0x428>;
-               interrupts = <0 177 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IIC3>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6540000 0 0x428>;
-               interrupts = <0 178 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IIC4>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6550000 0 0x428>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC6>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6560000 0 0x428>;
-               interrupts = <0 185 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC7>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6570000 0 0x428>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A73A4_CLK_IIC8>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
        scifb0: serial@e6c20000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6c20000 0 0x100>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb1: serial@e6c30000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6c30000 0 0x100>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa0: serial@e6c40000 {
                compatible = "renesas,scifa-r8a73a4", "renesas,scifa";
                reg = <0 0xe6c40000 0 0x100>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa1: serial@e6c50000 {
                compatible = "renesas,scifa-r8a73a4", "renesas,scifa";
                reg = <0 0xe6c50000 0 0x100>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb2: serial@e6ce0000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6ce0000 0 0x100>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb3: serial@e6cf0000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6cf0000 0 0x100>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_c4>;
                status = "disabled";
        };
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee100000 0 0x100>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_SDHI0>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi1: sd@ee120000 {
                compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee120000 0 0x100>;
-               interrupts = <0 166 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_SDHI1>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi2: sd@ee140000 {
                compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_SDHI2>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        mmcif0: mmc@ee200000 {
                compatible = "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_MMCIF0>;
                power-domains = <&pd_a3sp>;
                reg-io-width = <4>;
        mmcif1: mmc@ee220000 {
                compatible = "renesas,sh-mmcif";
                reg = <0 0xee220000 0 0x80>;
-               interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_MMCIF1>;
                power-domains = <&pd_a3sp>;
                reg-io-width = <4>;
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        bsc: bus@fec10000 {
index 6ef954766eef740f7ef439103fb4e90e46030444..995fbda74b7a057e57577f3a622f6f058fe3667c 100644 (file)
@@ -11,6 +11,7 @@
 /include/ "skeleton.dtsi"
 
 #include <dt-bindings/clock/r8a7740-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
@@ -41,7 +42,7 @@
        L2: cache-controller {
                compatible = "arm,pl310-cache";
                reg = <0xf0100000 0x1000>;
-               interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                power-domains = <&pd_a3sm>;
                arm,data-latency = <3 3 3>;
                arm,tag-latency = <2 2 2>;
@@ -58,7 +59,7 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        ptm {
@@ -69,7 +70,7 @@
        cmt1: timer@e6138000 {
                compatible = "renesas,cmt-48-r8a7740", "renesas,cmt-48";
                reg = <0xe6138000 0x170>;
-               interrupts = <0 58 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
                        <0xe6900020 1>,
                        <0xe6900040 1>,
                        <0xe6900060 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                        <0xe6900024 1>,
                        <0xe6900044 1>,
                        <0xe6900064 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                        <0xe6900028 1>,
                        <0xe6900048 1>,
                        <0xe6900068 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                        <0xe690002c 1>,
                        <0xe690004c 1>,
                        <0xe690006c 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                compatible = "renesas,gether-r8a7740";
                reg = <0xe9a00000 0x800>,
                      <0xe9a01800 0x800>;
-               interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_GETHER>;
                power-domains = <&pd_a4s>;
                phy-mode = "mii";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7740", "renesas,rmobile-iic";
                reg = <0xfff20000 0x425>;
-               interrupts = <0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7740_CLK_IIC0>;
                power-domains = <&pd_a4r>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7740", "renesas,rmobile-iic";
                reg = <0xe6c20000 0x425>;
-               interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH
-                             0 71 IRQ_TYPE_LEVEL_HIGH
-                             0 72 IRQ_TYPE_LEVEL_HIGH
-                             0 73 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_IIC1>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
        scifa0: serial@e6c40000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c40000 0x100>;
-               interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa1: serial@e6c50000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c50000 0x100>;
-               interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa2: serial@e6c60000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c60000 0x100>;
-               interrupts = <0 102 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa3: serial@e6c70000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c70000 0x100>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa4: serial@e6c80000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c80000 0x100>;
-               interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa5: serial@e6cb0000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6cb0000 0x100>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa6: serial@e6cc0000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6cc0000 0x100>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA6>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa7: serial@e6cd0000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6cd0000 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA7>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb: serial@e6c30000 {
                compatible = "renesas,scifb-r8a7740", "renesas,scifb";
                reg = <0xe6c30000 0x100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFB>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        mmcif0: mmc@e6bd0000 {
                compatible = "renesas,mmcif-r8a7740", "renesas,sh-mmcif";
                reg = <0xe6bd0000 0x100>;
-               interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH
-                             0 57 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_MMC>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
        sdhi0: sd@e6850000 {
                compatible = "renesas,sdhi-r8a7740";
                reg = <0xe6850000 0x100>;
-               interrupts = <0 117 IRQ_TYPE_LEVEL_HIGH
-                             0 118 IRQ_TYPE_LEVEL_HIGH
-                             0 119 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_SDHI0>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi1: sd@e6860000 {
                compatible = "renesas,sdhi-r8a7740";
                reg = <0xe6860000 0x100>;
-               interrupts = <0 121 IRQ_TYPE_LEVEL_HIGH
-                             0 122 IRQ_TYPE_LEVEL_HIGH
-                             0 123 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_SDHI1>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi2: sd@e6870000 {
                compatible = "renesas,sdhi-r8a7740";
                reg = <0xe6870000 0x100>;
-               interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH
-                             0 126 IRQ_TYPE_LEVEL_HIGH
-                             0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7740_CLK_SDHI2>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
                #sound-dai-cells = <1>;
                compatible = "renesas,fsi2-r8a7740", "renesas,sh_fsi2";
                reg = <0xfe1f0000 0x400>;
-               interrupts = <0 9 0x4>;
+               interrupts = <GIC_SPI 9 0x4>;
                clocks = <&mstp3_clks R8A7740_CLK_FSI>;
                power-domains = <&pd_a4mp>;
                status = "disabled";
        tmu0: timer@fff80000 {
                compatible = "renesas,tmu-r8a7740", "renesas,tmu";
                reg = <0xfff80000 0x2c>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 200 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7740_CLK_TMU0>;
                clock-names = "fck";
                power-domains = <&pd_a4r>;
        tmu1: timer@fff90000 {
                compatible = "renesas,tmu-r8a7740", "renesas,tmu";
                reg = <0xfff90000 0x2c>;
-               interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 171 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 172 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7740_CLK_TMU1>;
                clock-names = "fck";
                power-domains = <&pd_a4r>;
index a52b359e2ae24a300e9cc5f5de946d3005ad2afe..21e3b9dda2dabf5e8d44de253b7cfaad5d11b50a 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_a", "scif0_ctrl";
                renesas,function = "scif0";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        mmc_pins: mmc {
                renesas,groups = "mmc_data8", "mmc_ctrl";
                renesas,function = "mmc";
 
        status = "okay";
 };
+
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
index 791aafd310a5d78bf0ac92f379b7a77ef2d9a03f..f83a348fc07a49e37f6bef9cbdd33cbc76f6d192 100644 (file)
@@ -17,6 +17,7 @@
 /include/ "skeleton.dtsi"
 
 #include <dt-bindings/clock/r8a7778-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
@@ -51,7 +52,7 @@
        ether: ethernet@fde00000 {
                compatible = "renesas,ether-r8a7778";
                reg = <0xfde00000 0x400>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7778_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
                        <0xfe780024 4>,
                        <0xfe780044 4>,
                        <0xfe780064 4>;
-               interrupts =   <0 27 IRQ_TYPE_LEVEL_HIGH
-                               0 28 IRQ_TYPE_LEVEL_HIGH
-                               0 29 IRQ_TYPE_LEVEL_HIGH
-                               0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =   <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
+                               GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
+                               GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
+                               GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                sense-bitfield-width = <2>;
        };
 
        gpio0: gpio@ffc40000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc40000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
        gpio1: gpio@ffc41000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc41000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 32>;
        gpio2: gpio@ffc42000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc42000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@ffc43000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc43000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@ffc44000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc44000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 27>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc70000 0x1000>;
-               interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc71000 0x1000>;
-               interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc72000 0x1000>;
-               interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc73000 0x1000>;
-               interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        tmu0: timer@ffd80000 {
                compatible = "renesas,tmu-r8a7778", "renesas,tmu";
                reg = <0xffd80000 0x30>;
-               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 34 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_TMU0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu1: timer@ffd81000 {
                compatible = "renesas,tmu-r8a7778", "renesas,tmu";
                reg = <0xffd81000 0x30>;
-               interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_TMU1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu2: timer@ffd82000 {
                compatible = "renesas,tmu-r8a7778", "renesas,tmu";
                reg = <0xffd82000 0x30>;
-               interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 41 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 42 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_TMU2>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                };
 
                rcar_sound,ssi {
-                       ssi3: ssi@3 { interrupts = <0 0x85 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi4: ssi@4 { interrupts = <0 0x85 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi5: ssi@5 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi6: ssi@6 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi7: ssi@7 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi8: ssi@8 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi9: ssi@9 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi3: ssi@3 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi4: ssi@4 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi5: ssi@5 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi6: ssi@6 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi7: ssi@7 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi8: ssi@8 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi9: ssi@9 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
                };
        };
 
        scif0: serial@ffe40000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe40000 0x100>;
-               interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF0>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif1: serial@ffe41000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe41000 0x100>;
-               interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF1>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif2: serial@ffe42000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe42000 0x100>;
-               interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF2>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif3: serial@ffe43000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe43000 0x100>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF3>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif4: serial@ffe44000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe44000 0x100>;
-               interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF4>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif5: serial@ffe45000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe45000 0x100>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF5>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        mmcif: mmc@ffe4e000 {
                compatible = "renesas,sh-mmcif";
                reg = <0xffe4e000 0x100>;
-               interrupts = <0 61 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_MMC>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi0: sd@ffe4c000 {
                compatible = "renesas,sdhi-r8a7778";
                reg = <0xffe4c000 0x100>;
-               interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_SDHI0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi1: sd@ffe4d000 {
                compatible = "renesas,sdhi-r8a7778";
                reg = <0xffe4d000 0x100>;
-               interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_SDHI1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi2: sd@ffe4f000 {
                compatible = "renesas,sdhi-r8a7778";
                reg = <0xffe4f000 0x100>;
-               interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_SDHI2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        hspi0: spi@fffc7000 {
                compatible = "renesas,hspi-r8a7778", "renesas,hspi";
                reg = <0xfffc7000 0x18>;
-               interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        hspi1: spi@fffc8000 {
                compatible = "renesas,hspi-r8a7778", "renesas,hspi";
                reg = <0xfffc8000 0x18>;
-               interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        hspi2: spi@fffc6000 {
                compatible = "renesas,hspi-r8a7778", "renesas,hspi";
                reg = <0xfffc6000 0x18>;
-               interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
                        clock-output-names = "extal";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: cpg_clocks@ffc80000 {
                        compatible = "renesas,r8a7778-cpg-clocks";
index fe396c8d58db798637a5fabaa74fe1f069c8089f..e111d35d02aebe19a8c9ae3bce5e10fc9686665b 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        du_pins: du {
                du0 {
                        renesas,groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0";
                };
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk_b";
+               renesas,function = "scif_clk";
+       };
+
        ethernet_pins: ethernet {
                intc {
                        renesas,groups = "intc_irq1_b";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &sdhi0 {
        pinctrl-0 = <&sdhi0_pins>;
        pinctrl-names = "default";
index 6afa909865b52b71c970087e90dc77860ea177e6..a0cc08e6295b03968b026e2f4b46e49b7c9d6f16 100644 (file)
@@ -74,7 +74,7 @@
        gpio0: gpio@ffc40000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc40000 0x2c>;
-               interrupts = <0 141 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -85,7 +85,7 @@
        gpio1: gpio@ffc41000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc41000 0x2c>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 32>;
@@ -96,7 +96,7 @@
        gpio2: gpio@ffc42000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc42000 0x2c>;
-               interrupts = <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@ffc43000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc43000 0x2c>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@ffc44000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc44000 0x2c>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@ffc45000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc45000 0x2c>;
-               interrupts = <0 146 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        gpio6: gpio@ffc46000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc46000 0x2c>;
-               interrupts = <0 147 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 9>;
                        <0xfe780044 4>,
                        <0xfe780064 4>,
                        <0xfe780000 4>;
-               interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH
-                             0 28 IRQ_TYPE_LEVEL_HIGH
-                             0 29 IRQ_TYPE_LEVEL_HIGH
-                             0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                sense-bitfield-width = <2>;
        };
 
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc70000 0x1000>;
-               interrupts = <0 79 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc71000 0x1000>;
-               interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc72000 0x1000>;
-               interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc73000 0x1000>;
-               interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif0: serial@ffe40000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe40000 0x100>;
-               interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF0>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif1: serial@ffe41000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe41000 0x100>;
-               interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF1>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif2: serial@ffe42000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe42000 0x100>;
-               interrupts = <0 90 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF2>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif3: serial@ffe43000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe43000 0x100>;
-               interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF3>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif4: serial@ffe44000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe44000 0x100>;
-               interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF4>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif5: serial@ffe45000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe45000 0x100>;
-               interrupts = <0 93 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF5>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        tmu0: timer@ffd80000 {
                compatible = "renesas,tmu-r8a7779", "renesas,tmu";
                reg = <0xffd80000 0x30>;
-               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 34 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu1: timer@ffd81000 {
                compatible = "renesas,tmu-r8a7779", "renesas,tmu";
                reg = <0xffd81000 0x30>;
-               interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_TMU1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu2: timer@ffd82000 {
                compatible = "renesas,tmu-r8a7779", "renesas,tmu";
                reg = <0xffd82000 0x30>;
-               interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 41 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 42 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_TMU2>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        sata: sata@fc600000 {
                compatible = "renesas,sata-r8a7779", "renesas,rcar-sata";
                reg = <0xfc600000 0x2000>;
-               interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7779_CLK_SATA>;
                power-domains = <&cpg_clocks>;
        };
        sdhi0: sd@ffe4c000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4c000 0x100>;
-               interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi1: sd@ffe4d000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4d000 0x100>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi2: sd@ffe4e000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4e000 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi3: sd@ffe4f000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4f000 0x100>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        hspi0: spi@fffc7000 {
                compatible = "renesas,hspi-r8a7779", "renesas,hspi";
                reg = <0xfffc7000 0x18>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
        hspi1: spi@fffc8000 {
                compatible = "renesas,hspi-r8a7779", "renesas,hspi";
                reg = <0xfffc8000 0x18>;
-               interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
        hspi2: spi@fffc6000 {
                compatible = "renesas,hspi-r8a7779", "renesas,hspi";
                reg = <0xfffc6000 0x18>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
        du: display@fff80000 {
                compatible = "renesas,du-r8a7779";
                reg = <0 0xfff80000 0 0x40000>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7779_CLK_DU>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                        clock-output-names = "extal";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: clocks@ffc80000 {
                        compatible = "renesas,r8a7779-cpg-clocks";
index 052dcee4790dd298d2c84b07b4574e2cb462b5bc..cdc0414f5f0716dde7bc454cb656173dc418f012 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        du_pins: du {
                renesas,groups = "du_rgb666", "du_sync_1", "du_clk_out_0";
                renesas,function = "du";
                renesas,function = "scif0";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &msiof1 {
        pinctrl-0 = <&msiof1_pins>;
        pinctrl-names = "default";
index 7dfd393bfc7e7a5b52826139c9d4ad16a4de3841..c9583fa6cae7139f82387230765c08f6fbb5e70e 100644 (file)
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 30>;
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 30>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        thermal@e61f0000 {
                compatible = "renesas,thermal-r8a7790", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
                power-domains = <&cpg_clocks>;
        };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-r8a7790", "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a7790", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7790_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma0: dma-controller@ec700000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xec700000 0 0x10000>;
-               interrupts =    <0 346 IRQ_TYPE_LEVEL_HIGH
-                                0 320 IRQ_TYPE_LEVEL_HIGH
-                                0 321 IRQ_TYPE_LEVEL_HIGH
-                                0 322 IRQ_TYPE_LEVEL_HIGH
-                                0 323 IRQ_TYPE_LEVEL_HIGH
-                                0 324 IRQ_TYPE_LEVEL_HIGH
-                                0 325 IRQ_TYPE_LEVEL_HIGH
-                                0 326 IRQ_TYPE_LEVEL_HIGH
-                                0 327 IRQ_TYPE_LEVEL_HIGH
-                                0 328 IRQ_TYPE_LEVEL_HIGH
-                                0 329 IRQ_TYPE_LEVEL_HIGH
-                                0 330 IRQ_TYPE_LEVEL_HIGH
-                                0 331 IRQ_TYPE_LEVEL_HIGH
-                                0 332 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma1: dma-controller@ec720000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xec720000 0 0x10000>;
-               interrupts =    <0 347 IRQ_TYPE_LEVEL_HIGH
-                                0 333 IRQ_TYPE_LEVEL_HIGH
-                                0 334 IRQ_TYPE_LEVEL_HIGH
-                                0 335 IRQ_TYPE_LEVEL_HIGH
-                                0 336 IRQ_TYPE_LEVEL_HIGH
-                                0 337 IRQ_TYPE_LEVEL_HIGH
-                                0 338 IRQ_TYPE_LEVEL_HIGH
-                                0 339 IRQ_TYPE_LEVEL_HIGH
-                                0 340 IRQ_TYPE_LEVEL_HIGH
-                                0 341 IRQ_TYPE_LEVEL_HIGH
-                                0 342 IRQ_TYPE_LEVEL_HIGH
-                                0 343 IRQ_TYPE_LEVEL_HIGH
-                                0 344 IRQ_TYPE_LEVEL_HIGH
-                                0 345 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        usb_dmac0: dma-controller@e65a0000 {
                compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65a0000 0 0x100>;
-               interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
-                             0 109 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>;
                power-domains = <&cpg_clocks>;
        usb_dmac1: dma-controller@e65b0000 {
                compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65b0000 0 0x100>;
-               interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH
-                             0 110 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6508000 0 0x40>;
-               interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <110>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6518000 0 0x40>;
-               interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6530000 0 0x40>;
-               interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6540000 0 0x40>;
-               interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <110>;
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe6500000 0 0x425>;
-               interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
                dmas = <&dmac0 0x61>, <&dmac0 0x62>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe6510000 0 0x425>;
-               interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_IIC1>;
                dmas = <&dmac0 0x65>, <&dmac0 0x66>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe6520000 0 0x425>;
-               interrupts = <0 176 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_IIC2>;
                dmas = <&dmac0 0x69>, <&dmac0 0x6a>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe60b0000 0 0x425>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_IICDVFS>;
                dmas = <&dmac0 0x77>, <&dmac0 0x78>;
                dma-names = "tx", "rx";
        mmcif0: mmc@ee200000 {
                compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
                dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
                dma-names = "tx", "rx";
        mmcif1: mmc@ee220000 {
                compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
                reg = <0 0xee220000 0 0x80>;
-               interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_MMCIF1>;
                dmas = <&dmac0 0xe1>, <&dmac0 0xe2>;
                dma-names = "tx", "rx";
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee100000 0 0x328>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI0>;
                dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
                dma-names = "tx", "rx";
        sdhi1: sd@ee120000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee120000 0 0x328>;
-               interrupts = <0 166 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI1>;
                dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
                dma-names = "tx", "rx";
        sdhi2: sd@ee140000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI2>;
                dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
                dma-names = "tx", "rx";
        sdhi3: sd@ee160000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee160000 0 0x100>;
-               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI3>;
                dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
                dma-names = "tx", "rx";
        };
 
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7790", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7790",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7790", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7790",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7790", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7790",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7790", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7790",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7790", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7790",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7790", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7790",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7790", "renesas,scif";
+               compatible = "renesas,scif-r8a7790", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7790", "renesas,scif";
+               compatible = "renesas,scif-r8a7790", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7790", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7790",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7790", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7790",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7790";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
        avb: ethernet@e6800000 {
                compatible = "renesas,etheravb-r8a7790";
                reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
-               interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        sata0: sata@ee300000 {
                compatible = "renesas,sata-r8a7790";
                reg = <0 0xee300000 0 0x2000>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_SATA0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sata1: sata@ee500000 {
                compatible = "renesas,sata-r8a7790";
                reg = <0 0xee500000 0 0x2000>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_SATA1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        hsusb: usb@e6590000 {
-               compatible = "renesas,usbhs-r8a7790";
+               compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
                reg = <0 0xe6590000 0 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
                dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
                       <&usb_dmac1 0>, <&usb_dmac1 1>;
        vin0: video@e6ef0000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef0000 0 0x1000>;
-               interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin1: video@e6ef1000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef1000 0 0x1000>;
-               interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin2: video@e6ef2000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef2000 0 0x1000>;
-               interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin3: video@e6ef3000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef3000 0 0x1000>;
-               interrupts = <0 191 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vsp1@fe920000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe920000 0 0x8000>;
-               interrupts = <0 266 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_R>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe928000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe928000 0 0x8000>;
-               interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe930000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe930000 0 0x8000>;
-               interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU0>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe938000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe938000 0 0x8000>;
-               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU1>;
                power-domains = <&cpg_clocks>;
 
                      <0 0xfeb90000 0 0x1c>,
                      <0 0xfeb94000 0 0x1c>;
                reg-names = "du", "lvds.0", "lvds.1";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 269 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_DU0>,
                         <&mstp7_clks R8A7790_CLK_DU1>,
                         <&mstp7_clks R8A7790_CLK_DU2>,
        can0: can@e6e80000 {
                compatible = "renesas,can-r8a7790";
                reg = <0 0xe6e80000 0 0x1000>;
-               interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_RCAN0>,
                         <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        can1: can@e6e88000 {
                compatible = "renesas,can-r8a7790";
                reg = <0 0xe6e88000 0 0x1000>;
-               interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_RCAN1>,
                         <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        jpu: jpeg-codec@fe980000 {
                compatible = "renesas,jpu-r8a7790";
                reg = <0 0xfe980000 0 0x10300>;
-               interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_JPU>;
                power-domains = <&cpg_clocks>;
        };
                        clock-output-names = "audio_clk_c";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* External USB clock - can be overridden by the board */
                usb_extal_clk: usb_extal_clk {
                        compatible = "fixed-clock";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7790", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
        msiof0: spi@e6e20000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6e20000 0 0x0064>;
-               interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
                dmas = <&dmac0 0x51>, <&dmac0 0x52>;
                dma-names = "tx", "rx";
        msiof1: spi@e6e10000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6e10000 0 0x0064>;
-               interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
                dmas = <&dmac0 0x55>, <&dmac0 0x56>;
                dma-names = "tx", "rx";
        msiof2: spi@e6e00000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6e00000 0 0x0064>;
-               interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
                dmas = <&dmac0 0x41>, <&dmac0 0x42>;
                dma-names = "tx", "rx";
        msiof3: spi@e6c90000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6c90000 0 0x0064>;
-               interrupts = <0 159 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
                dmas = <&dmac0 0x45>, <&dmac0 0x46>;
                dma-names = "tx", "rx";
        xhci: usb@ee000000 {
                compatible = "renesas,xhci-r8a7790";
                reg = <0 0xee000000 0 0xc00>;
-               interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SSUSB>;
                power-domains = <&cpg_clocks>;
                phys = <&usb2 1>;
                device_type = "pci";
                reg = <0 0xee090000 0 0xc00>,
                      <0 0xee080000 0 0x1100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                device_type = "pci";
                reg = <0 0xee0b0000 0 0xc00>,
                      <0 0xee0a0000 0 0x1100>;
-               interrupts = <0 112 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0a0000 0 0xee0a0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 112 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 112 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 112 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        pci2: pci@ee0d0000 {
                power-domains = <&cpg_clocks>;
                reg = <0 0xee0d0000 0 0xc00>,
                      <0 0xee0c0000 0 0x1100>;
-               interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
 
                bus-range = <2 2>;
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                /* Map all possible DDR as inbound ranges */
                dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
                              0x43000000 1 0x80000000 1 0x80000000 0 0x80000000>;
-               interrupts = <0 116 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 117 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 118 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 116 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_PCIEC>, <&pcie_bus_clk>;
                clock-names = "pcie", "pcie_bus";
                power-domains = <&cpg_clocks>;
 
                rcar_sound,src {
                        src0: src@0 {
-                               interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x85>, <&audma1 0x9a>;
                                dma-names = "rx", "tx";
                        };
                        src1: src@1 {
-                               interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x87>, <&audma1 0x9c>;
                                dma-names = "rx", "tx";
                        };
                        src2: src@2 {
-                               interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x89>, <&audma1 0x9e>;
                                dma-names = "rx", "tx";
                        };
                        src3: src@3 {
-                               interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8b>, <&audma1 0xa0>;
                                dma-names = "rx", "tx";
                        };
                        src4: src@4 {
-                               interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8d>, <&audma1 0xb0>;
                                dma-names = "rx", "tx";
                        };
                        src5: src@5 {
-                               interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8f>, <&audma1 0xb2>;
                                dma-names = "rx", "tx";
                        };
                        src6: src@6 {
-                               interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x91>, <&audma1 0xb4>;
                                dma-names = "rx", "tx";
                        };
                        src7: src@7 {
-                               interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x93>, <&audma1 0xb6>;
                                dma-names = "rx", "tx";
                        };
                        src8: src@8 {
-                               interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x95>, <&audma1 0xb8>;
                                dma-names = "rx", "tx";
                        };
                        src9: src@9 {
-                               interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x97>, <&audma1 0xba>;
                                dma-names = "rx", "tx";
                        };
 
                rcar_sound,ssi {
                        ssi0: ssi@0 {
-                               interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi1: ssi@1 {
-                                interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+                                interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi2: ssi@2 {
-                               interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi3: ssi@3 {
-                               interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi4: ssi@4 {
-                               interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi5: ssi@5 {
-                               interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi6: ssi@6 {
-                               interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi7: ssi@7 {
-                               interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi8: ssi@8 {
-                               interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi9: ssi@9 {
-                               interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_rt: mmu@ffc80000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xffc80000 0 0x1000>;
-               interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
index 45256f3cc83560a80fa91b6313b46e0bed4f5a23..0ad71b81d3a25f59aeaa3e329de0ac9f55c0cdad 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        i2c2_pins: i2c2 {
                renesas,groups = "i2c2";
                renesas,function = "i2c2";
                renesas,function = "scif1";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &sdhi0 {
        pinctrl-0 = <&sdhi0_pins>;
        pinctrl-names = "default";
index 6713b1ea732b0b1d2282368ce5b50e42a9ae6043..ed1f6f884e2b216885efb98dd9460d76df7dc768 100644 (file)
@@ -8,6 +8,17 @@
  * kind, whether express or implied.
  */
 
+/*
+ * SSI-AK4642
+ *
+ * SW3: 1: AK4642
+ *      3: ADV7511
+ *
+ * This command is required before playback/capture:
+ *
+ *     amixer set "LINEOUT Mixer DACL" on
+ */
+
 /dts-v1/;
 #include "r8a7791.dtsi"
 #include <dt-bindings/gpio/gpio.h>
                states = <3300000 1
                          1800000 0>;
        };
+
+       hdmi-out {
+               compatible = "hdmi-connector";
+               type = "a";
+
+               port {
+                       hdmi_con: endpoint {
+                               remote-endpoint = <&adv7511_out>;
+                       };
+               };
+       };
+
+       x3_clk: x3-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <148500000>;
+       };
+
+       x16_clk: x16-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <74250000>;
+       };
+
+       x14_clk: x14-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <11289600>;
+               clock-output-names = "audio_clock";
+       };
+
+       sound {
+               compatible = "simple-audio-card";
+
+               simple-audio-card,format = "left_j";
+               simple-audio-card,bitclock-master = <&soundcodec>;
+               simple-audio-card,frame-master = <&soundcodec>;
+
+               simple-audio-card,cpu {
+                       sound-dai = <&rcar_sound>;
+               };
+
+               soundcodec: simple-audio-card,codec {
+                       sound-dai = <&ak4642>;
+                       clocks = <&x14_clk>;
+               };
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_d";
                renesas,function = "scif0";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
                renesas,groups = "can0_data";
                renesas,function = "can0";
        };
+
+       du_pins: du {
+               renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+               renesas,function = "du";
+       };
+
+       ssi_pins: sound {
+               renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+               renesas,function = "ssi";
+       };
+
+       audio_clk_pins: audio_clk {
+               renesas,groups = "audio_clk_a";
+               renesas,function = "audio_clk";
+       };
 };
 
 &scif0 {
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
        status = "okay";
        clock-frequency = <400000>;
 
+       ak4642: codec@12 {
+               compatible = "asahi-kasei,ak4642";
+               #sound-dai-cells = <0>;
+               reg = <0x12>;
+       };
+
        composite-in@20 {
                compatible = "adi,adv7180";
                reg = <0x20>;
                        };
                };
        };
+
+       hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <0x39>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_con>;
+                               };
+                       };
+               };
+       };
 };
 
 &sata0 {
 
        status = "okay";
 };
+
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       clocks = <&mstp7_clks R8A7791_CLK_DU0>,
+                <&mstp7_clks R8A7791_CLK_DU1>,
+                <&mstp7_clks R8A7791_CLK_LVDS0>,
+                <&x3_clk>, <&x16_clk>;
+       clock-names = "du.0", "du.1", "lvds.0",
+                     "dclkin.0", "dclkin.1";
+
+       ports {
+               port@1 {
+                       endpoint {
+                               remote-endpoint = <&adv7511_in>;
+                       };
+               };
+       };
+};
+
+&rcar_sound {
+       pinctrl-0 = <&ssi_pins &audio_clk_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       /* Single DAI */
+       #sound-dai-cells = <0>;
+
+       rcar_sound,dai {
+               dai0 {
+                       playback = <&ssi0>;
+                       capture  = <&ssi1>;
+               };
+       };
+};
+
+&ssi1 {
+       shared-pin;
+};
index 2a369ddcb6fd8dff8ce997993710d2f2592cb3f3..14aa62539ff297693c9510124266d6cb2c5f88cf 100644 (file)
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -97,7 +97,7 @@
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 26>;
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        gpio6: gpio@e6055400 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6055400 0 0x50>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 32>;
        gpio7: gpio@e6055800 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6055800 0 0x50>;
-               interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 224 26>;
        thermal@e61f0000 {
                compatible = "renesas,thermal-r8a7791", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A7791_CLK_THERMAL>;
                power-domains = <&cpg_clocks>;
        };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-r8a7791", "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a7791", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7791_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma0: dma-controller@ec700000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xec700000 0 0x10000>;
-               interrupts =    <0 346 IRQ_TYPE_LEVEL_HIGH
-                                0 320 IRQ_TYPE_LEVEL_HIGH
-                                0 321 IRQ_TYPE_LEVEL_HIGH
-                                0 322 IRQ_TYPE_LEVEL_HIGH
-                                0 323 IRQ_TYPE_LEVEL_HIGH
-                                0 324 IRQ_TYPE_LEVEL_HIGH
-                                0 325 IRQ_TYPE_LEVEL_HIGH
-                                0 326 IRQ_TYPE_LEVEL_HIGH
-                                0 327 IRQ_TYPE_LEVEL_HIGH
-                                0 328 IRQ_TYPE_LEVEL_HIGH
-                                0 329 IRQ_TYPE_LEVEL_HIGH
-                                0 330 IRQ_TYPE_LEVEL_HIGH
-                                0 331 IRQ_TYPE_LEVEL_HIGH
-                                0 332 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma1: dma-controller@ec720000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xec720000 0 0x10000>;
-               interrupts =    <0 347 IRQ_TYPE_LEVEL_HIGH
-                                0 333 IRQ_TYPE_LEVEL_HIGH
-                                0 334 IRQ_TYPE_LEVEL_HIGH
-                                0 335 IRQ_TYPE_LEVEL_HIGH
-                                0 336 IRQ_TYPE_LEVEL_HIGH
-                                0 337 IRQ_TYPE_LEVEL_HIGH
-                                0 338 IRQ_TYPE_LEVEL_HIGH
-                                0 339 IRQ_TYPE_LEVEL_HIGH
-                                0 340 IRQ_TYPE_LEVEL_HIGH
-                                0 341 IRQ_TYPE_LEVEL_HIGH
-                                0 342 IRQ_TYPE_LEVEL_HIGH
-                                0 343 IRQ_TYPE_LEVEL_HIGH
-                                0 344 IRQ_TYPE_LEVEL_HIGH
-                                0 345 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        usb_dmac0: dma-controller@e65a0000 {
                compatible = "renesas,r8a7791-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65a0000 0 0x100>;
-               interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
-                             0 109 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7791_CLK_USBDMAC0>;
                power-domains = <&cpg_clocks>;
        usb_dmac1: dma-controller@e65b0000 {
                compatible = "renesas,r8a7791-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65b0000 0 0x100>;
-               interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH
-                             0 110 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7791_CLK_USBDMAC1>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6508000 0 0x40>;
-               interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6518000 0 0x40>;
-               interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6530000 0 0x40>;
-               interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6540000 0 0x40>;
-               interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6520000 0 0x40>;
-               interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C4>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6528000 0 0x40>;
-               interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C5>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <110>;
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7791", "renesas,rmobile-iic";
                reg = <0 0xe60b0000 0 0x425>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_IICDVFS>;
                dmas = <&dmac0 0x77>, <&dmac0 0x78>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7791", "renesas,rmobile-iic";
                reg = <0 0xe6500000 0 0x425>;
-               interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_IIC0>;
                dmas = <&dmac0 0x61>, <&dmac0 0x62>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7791", "renesas,rmobile-iic";
                reg = <0 0xe6510000 0 0x425>;
-               interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_IIC1>;
                dmas = <&dmac0 0x65>, <&dmac0 0x66>;
                dma-names = "tx", "rx";
        mmcif0: mmc@ee200000 {
                compatible = "renesas,mmcif-r8a7791", "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_MMCIF0>;
                dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
                dma-names = "tx", "rx";
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a7791";
                reg = <0 0xee100000 0 0x328>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SDHI0>;
                dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
                dma-names = "tx", "rx";
        sdhi1: sd@ee140000 {
                compatible = "renesas,sdhi-r8a7791";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SDHI1>;
                dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
                dma-names = "tx", "rx";
        sdhi2: sd@ee160000 {
                compatible = "renesas,sdhi-r8a7791";
                reg = <0 0xee160000 0 0x100>;
-               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SDHI2>;
                dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
                dma-names = "tx", "rx";
        };
 
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa3: serial@e6c70000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c70000 0 64>;
-               interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7791_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa4: serial@e6c78000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c78000 0 64>;
-               interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7791_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa5: serial@e6c80000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c80000 0 64>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7791_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x23>, <&dmac0 0x24>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7791", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7791",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7791", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7791",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7791", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7791",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif2: serial@e6e58000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e58000 0 64>;
-               interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif3: serial@e6ea8000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ea8000 0 64>;
-               interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF3>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif4: serial@e6ee0000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee0000 0 64>;
-               interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF4>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif5: serial@e6ee8000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee8000 0 64>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF5>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7791", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7791",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7791", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7791",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif2: serial@e62d0000 {
-               compatible = "renesas,hscif-r8a7791", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7791",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62d0000 0 96>;
-               interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_HSCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_HSCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7791";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
                compatible = "renesas,etheravb-r8a7791",
                             "renesas,etheravb-rcar-gen2";
                reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
-               interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_ETHERAVB>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        sata0: sata@ee300000 {
                compatible = "renesas,sata-r8a7791";
                reg = <0 0xee300000 0 0x2000>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sata1: sata@ee500000 {
                compatible = "renesas,sata-r8a7791";
                reg = <0 0xee500000 0 0x2000>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_SATA1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        hsusb: usb@e6590000 {
-               compatible = "renesas,usbhs-r8a7791";
+               compatible = "renesas,usbhs-r8a7791", "renesas,rcar-gen2-usbhs";
                reg = <0 0xe6590000 0 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_HSUSB>;
                dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
                       <&usb_dmac1 0>, <&usb_dmac1 1>;
        vin0: video@e6ef0000 {
                compatible = "renesas,vin-r8a7791";
                reg = <0 0xe6ef0000 0 0x1000>;
-               interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_VIN0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin1: video@e6ef1000 {
                compatible = "renesas,vin-r8a7791";
                reg = <0 0xe6ef1000 0 0x1000>;
-               interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_VIN1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin2: video@e6ef2000 {
                compatible = "renesas,vin-r8a7791";
                reg = <0 0xe6ef2000 0 0x1000>;
-               interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_VIN2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vsp1@fe928000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe928000 0 0x8000>;
-               interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_VSP1_S>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe930000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe930000 0 0x8000>;
-               interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU0>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe938000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe938000 0 0x8000>;
-               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU1>;
                power-domains = <&cpg_clocks>;
 
                reg = <0 0xfeb00000 0 0x40000>,
                      <0 0xfeb90000 0 0x1c>;
                reg-names = "du", "lvds.0";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_DU0>,
                         <&mstp7_clks R8A7791_CLK_DU1>,
                         <&mstp7_clks R8A7791_CLK_LVDS0>;
        can0: can@e6e80000 {
                compatible = "renesas,can-r8a7791";
                reg = <0 0xe6e80000 0 0x1000>;
-               interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
                         <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        can1: can@e6e88000 {
                compatible = "renesas,can-r8a7791";
                reg = <0 0xe6e88000 0 0x1000>;
-               interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_RCAN1>,
                         <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        jpu: jpeg-codec@fe980000 {
                compatible = "renesas,jpu-r8a7791";
                reg = <0 0xfe980000 0 0x10300>;
-               interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_JPU>;
                power-domains = <&cpg_clocks>;
        };
                        status = "disabled";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* External USB clock - can be overridden by the board */
                usb_extal_clk: usb_extal_clk {
                        compatible = "fixed-clock";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7791", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
        msiof0: spi@e6e20000 {
                compatible = "renesas,msiof-r8a7791";
                reg = <0 0xe6e20000 0 0x0064>;
-               interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
                dmas = <&dmac0 0x51>, <&dmac0 0x52>;
                dma-names = "tx", "rx";
        msiof1: spi@e6e10000 {
                compatible = "renesas,msiof-r8a7791";
                reg = <0 0xe6e10000 0 0x0064>;
-               interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_MSIOF1>;
                dmas = <&dmac0 0x55>, <&dmac0 0x56>;
                dma-names = "tx", "rx";
        msiof2: spi@e6e00000 {
                compatible = "renesas,msiof-r8a7791";
                reg = <0 0xe6e00000 0 0x0064>;
-               interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_MSIOF2>;
                dmas = <&dmac0 0x41>, <&dmac0 0x42>;
                dma-names = "tx", "rx";
        xhci: usb@ee000000 {
                compatible = "renesas,xhci-r8a7791";
                reg = <0 0xee000000 0 0xc00>;
-               interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SSUSB>;
                power-domains = <&cpg_clocks>;
                phys = <&usb2 1>;
                device_type = "pci";
                reg = <0 0xee090000 0 0xc00>,
                      <0 0xee080000 0 0x1100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                device_type = "pci";
                reg = <0 0xee0d0000 0 0xc00>,
                      <0 0xee0c0000 0 0x1100>;
-               interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                /* Map all possible DDR as inbound ranges */
                dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
                              0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
-               interrupts = <0 116 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 117 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 118 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 116 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_PCIEC>, <&pcie_bus_clk>;
                clock-names = "pcie", "pcie_bus";
                power-domains = <&cpg_clocks>;
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_rt: mmu@ffc80000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xffc80000 0 0x1000>;
-               interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_gp: mmu@e62a0000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe62a0000 0 0x1000>;
-               interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 261 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
 
                rcar_sound,src {
                        src0: src@0 {
-                               interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x85>, <&audma1 0x9a>;
                                dma-names = "rx", "tx";
                        };
                        src1: src@1 {
-                               interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x87>, <&audma1 0x9c>;
                                dma-names = "rx", "tx";
                        };
                        src2: src@2 {
-                               interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x89>, <&audma1 0x9e>;
                                dma-names = "rx", "tx";
                        };
                        src3: src@3 {
-                               interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8b>, <&audma1 0xa0>;
                                dma-names = "rx", "tx";
                        };
                        src4: src@4 {
-                               interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8d>, <&audma1 0xb0>;
                                dma-names = "rx", "tx";
                        };
                        src5: src@5 {
-                               interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8f>, <&audma1 0xb2>;
                                dma-names = "rx", "tx";
                        };
                        src6: src@6 {
-                               interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x91>, <&audma1 0xb4>;
                                dma-names = "rx", "tx";
                        };
                        src7: src@7 {
-                               interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x93>, <&audma1 0xb6>;
                                dma-names = "rx", "tx";
                        };
                        src8: src@8 {
-                               interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x95>, <&audma1 0xb8>;
                                dma-names = "rx", "tx";
                        };
                        src9: src@9 {
-                               interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x97>, <&audma1 0xba>;
                                dma-names = "rx", "tx";
                        };
 
                rcar_sound,ssi {
                        ssi0: ssi@0 {
-                               interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi1: ssi@1 {
-                                interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+                                interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi2: ssi@2 {
-                               interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi3: ssi@3 {
-                               interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi4: ssi@4 {
-                               interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi5: ssi@5 {
-                               interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi6: ssi@6 {
-                               interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi7: ssi@7 {
-                               interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi8: ssi@8 {
-                               interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi9: ssi@9 {
-                               interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
index baa59fe8429869ac2bd975e5e7da5a443df122d6..cfe142c2ba3890316977021b1a64c788669e118b 100644 (file)
@@ -8,6 +8,34 @@
  * kind, whether express or implied.
  */
 
+/*
+ * SSI-AK4643
+ *
+ * SW1: 1: AK4643
+ *      2: CN22
+ *      3: ADV7511
+ *
+ * This command is required when Playback/Capture
+ *
+ *     amixer set "LINEOUT Mixer DACL" on
+ *     amixer set "DVC Out" 100%
+ *     amixer set "DVC In" 100%
+ *
+ * You can use Mute
+ *
+ *     amixer set "DVC Out Mute" on
+ *     amixer set "DVC In Mute" on
+ *
+ * You can use Volume Ramp
+ *
+ *     amixer set "DVC Out Ramp Up Rate"   "0.125 dB/64 steps"
+ *     amixer set "DVC Out Ramp Down Rate" "0.125 dB/512 steps"
+ *     amixer set "DVC Out Ramp" on
+ *     aplay xxx.wav &
+ *     amixer set "DVC Out"  80%  // Volume Down
+ *     amixer set "DVC Out" 100%  // Volume Up
+ */
+
 /dts-v1/;
 #include "r8a7793.dtsi"
 #include <dt-bindings/gpio/gpio.h>
                device_type = "memory";
                reg = <0 0x40000000 0 0x40000000>;
        };
+
+       gpio-keys {
+               compatible = "gpio-keys";
+
+               key-1 {
+                       gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_1>;
+                       label = "SW2-1";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-2 {
+                       gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_2>;
+                       label = "SW2-2";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-3 {
+                       gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_3>;
+                       label = "SW2-3";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-4 {
+                       gpios = <&gpio5 3 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_4>;
+                       label = "SW2-4";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-a {
+                       gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_A>;
+                       label = "SW30";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-b {
+                       gpios = <&gpio7 1 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_B>;
+                       label = "SW31";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-c {
+                       gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_C>;
+                       label = "SW32";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-d {
+                       gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_D>;
+                       label = "SW33";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-e {
+                       gpios = <&gpio7 4 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_E>;
+                       label = "SW34";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-f {
+                       gpios = <&gpio7 5 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_F>;
+                       label = "SW35";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-g {
+                       gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_G>;
+                       label = "SW36";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+       };
+
+       leds {
+               compatible = "gpio-leds";
+               led6 {
+                       gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+                       label = "LED6";
+               };
+               led7 {
+                       gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+                       label = "LED7";
+               };
+               led8 {
+                       gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
+                       label = "LED8";
+               };
+       };
+
+       audio_clock: clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <11289600>;
+               clock-output-names = "audio_clock";
+       };
+
+       rsnd_ak4643: sound {
+               compatible = "simple-audio-card";
+
+               simple-audio-card,format = "left_j";
+               simple-audio-card,bitclock-master = <&sndcodec>;
+               simple-audio-card,frame-master = <&sndcodec>;
+
+               sndcpu: simple-audio-card,cpu {
+                       sound-dai = <&rcar_sound>;
+               };
+
+               sndcodec: simple-audio-card,codec {
+                       sound-dai = <&ak4643>;
+                       clocks = <&audio_clock>;
+               };
+       };
+
+       hdmi-out {
+               compatible = "hdmi-connector";
+               type = "a";
+
+               port {
+                       hdmi_con: endpoint {
+                               remote-endpoint = <&adv7511_out>;
+                       };
+               };
+       };
+
+       x2_clk: x2-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <74250000>;
+       };
+
+       x13_clk: x13-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <148500000>;
+       };
+};
+
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       clocks = <&mstp7_clks R8A7793_CLK_DU0>,
+                <&mstp7_clks R8A7793_CLK_DU1>,
+                <&mstp7_clks R8A7793_CLK_LVDS0>,
+                <&x13_clk>, <&x2_clk>;
+       clock-names = "du.0", "du.1", "lvds.0",
+                     "dclkin.0", "dclkin.1";
+
+       ports {
+               port@0 {
+                       endpoint {
+                               remote-endpoint = <&adv7511_in>;
+                       };
+               };
+               port@1 {
+                       lvds_connector: endpoint {
+                       };
+               };
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
+       i2c2_pins: i2c2 {
+               renesas,groups = "i2c2";
+               renesas,function = "i2c2";
+       };
+
+       du_pins: du {
+               renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+               renesas,function = "du";
+       };
+
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_d";
                renesas,function = "scif0";
                renesas,function = "scif1";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
                renesas,groups = "qspi_ctrl", "qspi_data4";
                renesas,function = "qspi";
        };
+
+       sound_pins: sound {
+               renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+               renesas,function = "ssi";
+       };
+
+       sound_clk_pins: sound_clk {
+               renesas,groups = "audio_clk_a";
+               renesas,function = "audio_clk";
+       };
 };
 
 &ether {
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &qspi {
        pinctrl-0 = <&qspi_pins>;
        pinctrl-names = "default";
                };
        };
 };
+
+&i2c2 {
+       pinctrl-0 = <&i2c2_pins>;
+       pinctrl-names = "default";
+
+       status = "okay";
+       clock-frequency = <100000>;
+
+       ak4643: codec@12 {
+               compatible = "asahi-kasei,ak4643";
+               #sound-dai-cells = <0>;
+               reg = <0x12>;
+       };
+
+       hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <0x39>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_con>;
+                               };
+                       };
+               };
+       };
+
+       eeprom@50 {
+               compatible = "renesas,r1ex24002", "atmel,24c02";
+               reg = <0x50>;
+               pagesize = <16>;
+       };
+};
+
+&rcar_sound {
+       pinctrl-0 = <&sound_pins &sound_clk_pins>;
+       pinctrl-names = "default";
+
+       /* Single DAI */
+       #sound-dai-cells = <0>;
+
+       status = "okay";
+
+       rcar_sound,dai {
+               dai0 {
+                       playback = <&ssi0 &src2 &dvc0>;
+                       capture  = <&ssi1 &src3 &dvc1>;
+               };
+       };
+};
+
+&ssi1 {
+       shared-pin;
+};
index aef9e69d6c26ae7cfab58fb5418e6771e0f226ba..45dba1c79a43c28782726685874811b8f7a90650 100644 (file)
        #size-cells = <2>;
 
        aliases {
+               i2c0 = &i2c0;
+               i2c1 = &i2c1;
+               i2c2 = &i2c2;
+               i2c3 = &i2c3;
+               i2c4 = &i2c4;
+               i2c5 = &i2c5;
+               i2c6 = &i2c6;
+               i2c7 = &i2c7;
+               i2c8 = &i2c8;
                spi0 = &qspi;
        };
 
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -73,7 +82,7 @@
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 26>;
@@ -86,7 +95,7 @@
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        gpio6: gpio@e6055400 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6055400 0 0x50>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 32>;
        gpio7: gpio@e6055800 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6055800 0 0x50>;
-               interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 224 26>;
        thermal@e61f0000 {
                compatible = "renesas,thermal-r8a7793", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A7793_CLK_THERMAL>;
                power-domains = <&cpg_clocks>;
        };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-r8a7793", "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7793_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a7793", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7793_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7793_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
 
-       pfc: pfc@e6060000 {
-               compatible = "renesas,pfc-r8a7793";
-               reg = <0 0xe6060000 0 0x250>;
-       };
-
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
                dma-channels = <15>;
        };
 
+       audma0: dma-controller@ec700000 {
+               compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
+               reg = <0 0xec700000 0 0x10000>;
+               interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "error",
+                               "ch0", "ch1", "ch2", "ch3",
+                               "ch4", "ch5", "ch6", "ch7",
+                               "ch8", "ch9", "ch10", "ch11",
+                               "ch12";
+               clocks = <&mstp5_clks R8A7793_CLK_AUDIO_DMAC0>;
+               clock-names = "fck";
+               power-domains = <&cpg_clocks>;
+               #dma-cells = <1>;
+               dma-channels = <13>;
+       };
+
+       audma1: dma-controller@ec720000 {
+               compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
+               reg = <0 0xec720000 0 0x10000>;
+               interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "error",
+                               "ch0", "ch1", "ch2", "ch3",
+                               "ch4", "ch5", "ch6", "ch7",
+                               "ch8", "ch9", "ch10", "ch11",
+                               "ch12";
+               clocks = <&mstp5_clks R8A7793_CLK_AUDIO_DMAC1>;
+               clock-names = "fck";
+               power-domains = <&cpg_clocks>;
+               #dma-cells = <1>;
+               dma-channels = <13>;
+       };
+
+       /* The memory map in the User's Manual maps the cores to bus numbers */
+       i2c0: i2c@e6508000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6508000 0 0x40>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C0>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c1: i2c@e6518000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6518000 0 0x40>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C1>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c2: i2c@e6530000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6530000 0 0x40>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C2>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c3: i2c@e6540000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6540000 0 0x40>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C3>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c4: i2c@e6520000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6520000 0 0x40>;
+               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C4>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c5: i2c@e6528000 {
+               /* doesn't need pinmux */
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6528000 0 0x40>;
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C5>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <110>;
+               status = "disabled";
+       };
+
+       i2c6: i2c@e60b0000 {
+               /* doesn't need pinmux */
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,iic-r8a7793", "renesas,rmobile-iic";
+               reg = <0 0xe60b0000 0 0x425>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_IICDVFS>;
+               dmas = <&dmac0 0x77>, <&dmac0 0x78>;
+               dma-names = "tx", "rx";
+               power-domains = <&cpg_clocks>;
+               status = "disabled";
+       };
+
+       i2c7: i2c@e6500000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,iic-r8a7793", "renesas,rmobile-iic";
+               reg = <0 0xe6500000 0 0x425>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp3_clks R8A7793_CLK_IIC0>;
+               dmas = <&dmac0 0x61>, <&dmac0 0x62>;
+               dma-names = "tx", "rx";
+               power-domains = <&cpg_clocks>;
+               status = "disabled";
+       };
+
+       i2c8: i2c@e6510000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,iic-r8a7793", "renesas,rmobile-iic";
+               reg = <0 0xe6510000 0 0x425>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp3_clks R8A7793_CLK_IIC1>;
+               dmas = <&dmac0 0x65>, <&dmac0 0x66>;
+               dma-names = "tx", "rx";
+               power-domains = <&cpg_clocks>;
+               status = "disabled";
+       };
+
+       pfc: pfc@e6060000 {
+               compatible = "renesas,pfc-r8a7793";
+               reg = <0 0xe6060000 0 0x250>;
+       };
+
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa3: serial@e6c70000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c70000 0 64>;
-               interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7793_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa4: serial@e6c78000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c78000 0 64>;
-               interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7793_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa5: serial@e6c80000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c80000 0 64>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7793_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x23>, <&dmac0 0x24>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7793", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7793",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7793", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7793",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7793", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7793",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif2: serial@e6e58000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e58000 0 64>;
-               interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif3: serial@e6ea8000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ea8000 0 64>;
-               interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF3>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif4: serial@e6ee0000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee0000 0 64>;
-               interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF4>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif5: serial@e6ee8000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee8000 0 64>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF5>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7793", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7793",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7793", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7793",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif2: serial@e62d0000 {
-               compatible = "renesas,hscif-r8a7793", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7793",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62d0000 0 96>;
-               interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_HSCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_HSCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7793";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7793_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7793", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7793_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
                reg = <0 0xfeb00000 0 0x40000>,
                      <0 0xfeb90000 0 0x1c>;
                reg-names = "du", "lvds.0";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7793_CLK_DU0>,
                         <&mstp7_clks R8A7793_CLK_DU1>,
                         <&mstp7_clks R8A7793_CLK_LVDS0>;
                        clock-output-names = "extal";
                };
 
+               /*
+                * The external audio clocks are configured as 0 Hz fixed frequency clocks by
+                * default. Boards that provide audio clocks should override them.
+                */
+               audio_clk_a: audio_clk_a {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "audio_clk_a";
+               };
+               audio_clk_b: audio_clk_b {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "audio_clk_b";
+               };
+               audio_clk_c: audio_clk_c {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "audio_clk_c";
+               };
+
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: cpg_clocks@e6150000 {
                        compatible = "renesas,r8a7793-cpg-clocks",
                        clock-mult = <1>;
                        clock-output-names = "p";
                };
+               m2_clk: m2_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+                       #clock-cells = <0>;
+                       clock-div = <8>;
+                       clock-mult = <1>;
+                       clock-output-names = "m2";
+               };
                rclk_clk: rclk_clk {
                        compatible = "fixed-factor-clock";
                        clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
                mstp5_clks: mstp5_clks@e6150144 {
                        compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
                        reg = <0 0xe6150144 0 4>, <0 0xe615003c 0 4>;
-                       clocks = <&extal_clk>;
+                       clocks = <&hp_clk>, <&hp_clk>, <&extal_clk>;
                        #clock-cells = <1>;
-                       clock-indices = <R8A7793_CLK_THERMAL>;
-                       clock-output-names = "thermal";
+                       clock-indices = <R8A7793_CLK_AUDIO_DMAC0 R8A7793_CLK_AUDIO_DMAC1
+                                        R8A7793_CLK_THERMAL>;
+                       clock-output-names = "audmac0", "audmac1", "thermal";
                };
                mstp7_clks: mstp7_clks@e615014c {
                        compatible = "renesas,r8a7793-mstp-clocks",
                        reg = <0 0xe6150994 0 4>, <0 0xe61509a4 0 4>;
                        clocks = <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
                                 <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
-                                <&cpg_clocks R8A7793_CLK_QSPI>;
+                                <&cpg_clocks R8A7793_CLK_QSPI>, <&hp_clk>,
+                                <&cp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>,
+                                <&hp_clk>, <&hp_clk>;
                        #clock-cells = <1>;
                        clock-indices = <
                                R8A7793_CLK_GPIO7 R8A7793_CLK_GPIO6
                                R8A7793_CLK_GPIO5 R8A7793_CLK_GPIO4
                                R8A7793_CLK_GPIO3 R8A7793_CLK_GPIO2
                                R8A7793_CLK_GPIO1 R8A7793_CLK_GPIO0
-                               R8A7793_CLK_QSPI_MOD
+                               R8A7793_CLK_QSPI_MOD R8A7793_CLK_I2C5
+                               R8A7793_CLK_IICDVFS R8A7793_CLK_I2C4
+                               R8A7793_CLK_I2C3 R8A7793_CLK_I2C2
+                               R8A7793_CLK_I2C1 R8A7793_CLK_I2C0
                        >;
                        clock-output-names =
                                "gpio7", "gpio6", "gpio5", "gpio4",
                                "gpio3", "gpio2", "gpio1", "gpio0",
-                               "qspi_mod";
+                               "qspi_mod", "i2c5", "i2c6", "i2c4",
+                               "i2c3", "i2c2", "i2c1", "i2c0";
+               };
+               mstp10_clks: mstp10_clks@e6150998 {
+                       compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
+                       reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
+                       clocks = <&p_clk>,
+                               <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+                               <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+                               <&p_clk>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>;
+
+                       #clock-cells = <1>;
+                       clock-indices = <
+                               R8A7793_CLK_SSI_ALL
+                               R8A7793_CLK_SSI9 R8A7793_CLK_SSI8 R8A7793_CLK_SSI7 R8A7793_CLK_SSI6 R8A7793_CLK_SSI5
+                               R8A7793_CLK_SSI4 R8A7793_CLK_SSI3 R8A7793_CLK_SSI2 R8A7793_CLK_SSI1 R8A7793_CLK_SSI0
+                               R8A7793_CLK_SCU_ALL
+                               R8A7793_CLK_SCU_DVC1 R8A7793_CLK_SCU_DVC0
+                               R8A7793_CLK_SCU_CTU1_MIX1 R8A7793_CLK_SCU_CTU0_MIX0
+                               R8A7793_CLK_SCU_SRC9 R8A7793_CLK_SCU_SRC8 R8A7793_CLK_SCU_SRC7 R8A7793_CLK_SCU_SRC6 R8A7793_CLK_SCU_SRC5
+                               R8A7793_CLK_SCU_SRC4 R8A7793_CLK_SCU_SRC3 R8A7793_CLK_SCU_SRC2 R8A7793_CLK_SCU_SRC1 R8A7793_CLK_SCU_SRC0
+                       >;
+                       clock-output-names =
+                               "ssi-all",
+                               "ssi9", "ssi8", "ssi7", "ssi6", "ssi5",
+                               "ssi4", "ssi3", "ssi2", "ssi1", "ssi0",
+                               "scu-all",
+                               "scu-dvc1", "scu-dvc0",
+                               "scu-ctu1-mix1", "scu-ctu0-mix0",
+                               "scu-src9", "scu-src8", "scu-src7", "scu-src6", "scu-src5",
+                               "scu-src4", "scu-src3", "scu-src2", "scu-src1", "scu-src0";
                };
                mstp11_clks: mstp11_clks@e615099c {
                        compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_rt: mmu@ffc80000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xffc80000 0 0x1000>;
-               interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_gp: mmu@e62a0000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe62a0000 0 0x1000>;
-               interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 261 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
+
+       rcar_sound: sound@ec500000 {
+               /*
+                * #sound-dai-cells is required
+                *
+                * Single DAI : #sound-dai-cells = <0>;         <&rcar_sound>;
+                * Multi  DAI : #sound-dai-cells = <1>;         <&rcar_sound N>;
+                */
+               compatible =  "renesas,rcar_sound-r8a7793", "renesas,rcar_sound-gen2";
+               reg =   <0 0xec500000 0 0x1000>, /* SCU */
+                       <0 0xec5a0000 0 0x100>,  /* ADG */
+                       <0 0xec540000 0 0x1000>, /* SSIU */
+                       <0 0xec541000 0 0x280>,  /* SSI */
+                       <0 0xec740000 0 0x200>;  /* Audio DMAC peri peri*/
+               reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+               clocks = <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+                       <&mstp10_clks R8A7793_CLK_SSI9>, <&mstp10_clks R8A7793_CLK_SSI8>,
+                       <&mstp10_clks R8A7793_CLK_SSI7>, <&mstp10_clks R8A7793_CLK_SSI6>,
+                       <&mstp10_clks R8A7793_CLK_SSI5>, <&mstp10_clks R8A7793_CLK_SSI4>,
+                       <&mstp10_clks R8A7793_CLK_SSI3>, <&mstp10_clks R8A7793_CLK_SSI2>,
+                       <&mstp10_clks R8A7793_CLK_SSI1>, <&mstp10_clks R8A7793_CLK_SSI0>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC9>, <&mstp10_clks R8A7793_CLK_SCU_SRC8>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC7>, <&mstp10_clks R8A7793_CLK_SCU_SRC6>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC5>, <&mstp10_clks R8A7793_CLK_SCU_SRC4>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC3>, <&mstp10_clks R8A7793_CLK_SCU_SRC2>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC1>, <&mstp10_clks R8A7793_CLK_SCU_SRC0>,
+                       <&mstp10_clks R8A7793_CLK_SCU_DVC0>, <&mstp10_clks R8A7793_CLK_SCU_DVC1>,
+                       <&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>, <&m2_clk>;
+               clock-names = "ssi-all",
+                               "ssi.9", "ssi.8", "ssi.7", "ssi.6", "ssi.5",
+                               "ssi.4", "ssi.3", "ssi.2", "ssi.1", "ssi.0",
+                               "src.9", "src.8", "src.7", "src.6", "src.5",
+                               "src.4", "src.3", "src.2", "src.1", "src.0",
+                               "dvc.0", "dvc.1",
+                               "clk_a", "clk_b", "clk_c", "clk_i";
+               power-domains = <&cpg_clocks>;
+
+               status = "disabled";
+
+               rcar_sound,dvc {
+                       dvc0: dvc@0 {
+                               dmas = <&audma0 0xbc>;
+                               dma-names = "tx";
+                       };
+                       dvc1: dvc@1 {
+                               dmas = <&audma0 0xbe>;
+                               dma-names = "tx";
+                       };
+               };
+
+               rcar_sound,src {
+                       src0: src@0 {
+                               interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x85>, <&audma1 0x9a>;
+                               dma-names = "rx", "tx";
+                       };
+                       src1: src@1 {
+                               interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x87>, <&audma1 0x9c>;
+                               dma-names = "rx", "tx";
+                       };
+                       src2: src@2 {
+                               interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x89>, <&audma1 0x9e>;
+                               dma-names = "rx", "tx";
+                       };
+                       src3: src@3 {
+                               interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+                               dma-names = "rx", "tx";
+                       };
+                       src4: src@4 {
+                               interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+                               dma-names = "rx", "tx";
+                       };
+                       src5: src@5 {
+                               interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+                               dma-names = "rx", "tx";
+                       };
+                       src6: src@6 {
+                               interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x91>, <&audma1 0xb4>;
+                               dma-names = "rx", "tx";
+                       };
+                       src7: src@7 {
+                               interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x93>, <&audma1 0xb6>;
+                               dma-names = "rx", "tx";
+                       };
+                       src8: src@8 {
+                               interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x95>, <&audma1 0xb8>;
+                               dma-names = "rx", "tx";
+                       };
+                       src9: src@9 {
+                               interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x97>, <&audma1 0xba>;
+                               dma-names = "rx", "tx";
+                       };
+               };
+
+               rcar_sound,ssi {
+                       ssi0: ssi@0 {
+                               interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi1: ssi@1 {
+                                interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi2: ssi@2 {
+                               interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi3: ssi@3 {
+                               interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi4: ssi@4 {
+                               interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi5: ssi@5 {
+                               interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi6: ssi@6 {
+                               interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi7: ssi@7 {
+                               interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi8: ssi@8 {
+                               interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi9: ssi@9 {
+                               interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+               };
+       };
 };
index 2394e4883786f13eb0a4b005fb6a81e456f9a3f0..ca9bc4fff28751ea9e2b11b6afd23c7920449759 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        du_pins: du {
                renesas,groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_dotclkout0";
                renesas,function = "du";
                renesas,function = "scif2";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&pfc {
+       qspi_pins: spi0 {
+               renesas,groups = "qspi_ctrl", "qspi_data4";
+               renesas,function = "qspi";
+       };
+};
+
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
 
        status = "okay";
 };
+
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
+&qspi {
+       pinctrl-0 = <&qspi_pins>;
+       pinctrl-names = "default";
+
+       status = "okay";
+
+       flash@0 {
+               compatible = "spansion,s25fl512s", "jedec,spi-nor";
+               reg = <0>;
+               spi-max-frequency = <30000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
+               spi-cpol;
+               spi-cpha;
+               m25p,fast-read;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       partition@0 {
+                               label = "loader";
+                               reg = <0x00000000 0x00040000>;
+                               read-only;
+                       };
+                       partition@40000 {
+                               label = "system";
+                               reg = <0x00040000 0x00040000>;
+                               read-only;
+                       };
+                       partition@80000 {
+                               label = "user";
+                               reg = <0x00080000 0x03f80000>;
+                       };
+               };
+       };
+};
index 5153e3af25d94c0f8f14a93b5f894fa60e217fce..66f077a3ca41d1b06a367ca265ee5aac48f71bf2 100644 (file)
                states = <3300000 1
                          1800000 0>;
        };
+
+       vga-encoder {
+               compatible = "adi,adv7123";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7123_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb1>;
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               adv7123_out: endpoint {
+                                       remote-endpoint = <&vga_in>;
+                               };
+                       };
+               };
+       };
+
+       hdmi-out {
+               compatible = "hdmi-connector";
+               type = "a";
+
+               port {
+                       hdmi_con: endpoint {
+                               remote-endpoint = <&adv7511_out>;
+                       };
+               };
+       };
+
+       vga {
+               compatible = "vga-connector";
+
+               port {
+                       vga_in: endpoint {
+                               remote-endpoint = <&adv7123_out>;
+                       };
+               };
+       };
+
+       x2_clk: x2-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <148500000>;
+       };
+
+       x3_clk: x3-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <74250000>;
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif2_pins: serial2 {
                renesas,groups = "scif2_data";
                renesas,function = "scif2";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
                        };
                };
        };
+
+       hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <0x39>;
+               interrupt-parent = <&gpio5>;
+               interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb0>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_con>;
+                               };
+                       };
+               };
+       };
 };
 
 &mmcif0 {
 &usbphy {
        status = "okay";
 };
+
+&du {
+       status = "okay";
+
+       clocks = <&mstp7_clks R8A7794_CLK_DU0>,
+                <&mstp7_clks R8A7794_CLK_DU0>,
+                <&x2_clk>, <&x3_clk>;
+       clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1";
+
+       ports {
+               port@0 {
+                       endpoint {
+                               remote-endpoint = <&adv7511_in>;
+                       };
+               };
+               port@1 {
+                       endpoint {
+                               remote-endpoint = <&adv7123_in>;
+                       };
+               };
+       };
+};
index 6c78f1fae90f7ae43b42f6a8baea078a73fd30f4..7d4c5597af5b54bd76d55f766324f8ff7985ea4e 100644 (file)
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -78,7 +78,7 @@
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 26>;
@@ -91,7 +91,7 @@
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 28>;
        gpio6: gpio@e6055400 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6055400 0 0x50>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 26>;
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7794_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        irqc0: interrupt-controller@e61c0000 {
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7794_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7794", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7794", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        };
 
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa3: serial@e6c70000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c70000 0 64>;
-               interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7794_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa4: serial@e6c78000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c78000 0 64>;
-               interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7794_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa5: serial@e6c80000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c80000 0 64>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7794_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x23>, <&dmac0 0x24>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7794", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7794",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7794", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7794",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7794", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7794",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif2: serial@e6e58000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e58000 0 64>;
-               interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif3: serial@e6ea8000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ea8000 0 64>;
-               interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF3>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif4: serial@e6ee0000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee0000 0 64>;
-               interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF4>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif5: serial@e6ee8000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee8000 0 64>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF5>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7794", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7794",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7794", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7794",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif2: serial@e62d0000 {
-               compatible = "renesas,hscif-r8a7794", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7794",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62d0000 0 96>;
-               interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_HSCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_HSCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7794";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7794_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
        i2c0: i2c@e6508000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6508000 0 0x40>;
-               interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c1: i2c@e6518000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6518000 0 0x40>;
-               interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c2: i2c@e6530000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6530000 0 0x40>;
-               interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c3: i2c@e6540000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6540000 0 0x40>;
-               interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c4: i2c@e6520000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6520000 0 0x40>;
-               interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C4>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c5: i2c@e6528000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6528000 0 0x40>;
-               interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C5>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        mmcif0: mmc@ee200000 {
                compatible = "renesas,mmcif-r8a7794", "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_MMCIF0>;
                dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
                dma-names = "tx", "rx";
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a7794";
                reg = <0 0xee100000 0 0x200>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi1: sd@ee140000 {
                compatible = "renesas,sdhi-r8a7794";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_SDHI1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi2: sd@ee160000 {
                compatible = "renesas,sdhi-r8a7794";
                reg = <0 0xee160000 0 0x100>;
-               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_SDHI2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7794", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
        vin0: video@e6ef0000 {
                compatible = "renesas,vin-r8a7794";
                reg = <0 0xe6ef0000 0 0x1000>;
-               interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7794_CLK_VIN0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin1: video@e6ef1000 {
                compatible = "renesas,vin-r8a7794";
                reg = <0 0xe6ef1000 0 0x1000>;
-               interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7794_CLK_VIN1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                device_type = "pci";
                reg = <0 0xee090000 0 0xc00>,
                      <0 0xee080000 0 0x1100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                device_type = "pci";
                reg = <0 0xee0d0000 0 0xc00>,
                      <0 0xee0c0000 0 0x1100>;
-               interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
        };
 
        hsusb: usb@e6590000 {
-               compatible = "renesas,usbhs-r8a7794";
+               compatible = "renesas,usbhs-r8a7794", "renesas,rcar-gen2-usbhs";
                reg = <0 0xe6590000 0 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_HSUSB>;
                power-domains = <&cpg_clocks>;
                renesas,buswait = <4>;
                compatible = "renesas,du-r8a7794";
                reg = <0 0xfeb00000 0 0x40000>;
                reg-names = "du";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_DU0>,
                         <&mstp7_clks R8A7794_CLK_DU0>;
                clock-names = "du.0", "du.1";
                        clock-output-names = "extal";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: cpg_clocks@e6150000 {
                        compatible = "renesas,r8a7794-cpg-clocks",
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_gp: mmu@e62a0000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe62a0000 0 0x1000>;
-               interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 261 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
index 992f9cadbc04e1fa5534b35c2c4966abcaff42b4..d5913fe128eed0e7b39a8a8ff650bf73f57b63ac 100644 (file)
        model = "Rockchip RK3036 KylinBoard";
        compatible = "rockchip,rk3036-kylin", "rockchip,rk3036";
 
+       leds: gpio-leds {
+               compatible = "gpio-leds";
+
+               work {
+                       gpios = <&gpio2 30 GPIO_ACTIVE_HIGH>;
+                       label = "kylin:red:led";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&led_ctl>;
+               };
+       };
+
+       sdio_pwrseq: sdio-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               pinctrl-names = "default";
+               pinctrl-0 = <&bt_wake_h>;
+
+               /*
+                * On the module itself this is one of these (depending
+                * on the actual card populated):
+                * - SDIO_RESET_L_WL_REG_ON
+                * - SDIO_RESET_L_WL_RST
+                * - SDIO_RESET_L_BT_EN
+                */
+               reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>, /* WL_REG_ON */
+                             <&gpio0 27 GPIO_ACTIVE_LOW>, /* WL_RST */
+                             <&gpio2 9  GPIO_ACTIVE_LOW>; /* BT_EN */
+       };
+
+       sound {
+               compatible = "simple-audio-card";
+               simple-audio-card,format = "i2s";
+               simple-audio-card,name = "rockchip,rt5616-codec";
+               simple-audio-card,mclk-fs = <512>;
+               simple-audio-card,widgets =
+                       "Microphone", "Microphone Jack",
+                       "Headphone", "Headphone Jack";
+               simple-audio-card,routing =
+                       "MIC1", "Microphone Jack",
+                       "MIC2", "Microphone Jack",
+                       "Microphone Jack", "micbias1",
+                       "Headphone Jack", "HPOL",
+                       "Headphone Jack", "HPOR";
+
+               simple-audio-card,cpu {
+                       sound-dai = <&i2s>;
+               };
+
+               simple-audio-card,codec {
+                       sound-dai = <&rt5616>;
+               };
+       };
+
        vcc_sys: vsys-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vcc_sys";
 
 &i2c2 {
        status = "okay";
+
+       rt5616: rt5616@1b {
+               compatible = "rt5616";
+               reg = <0x1b>;
+               clocks = <&cru SCLK_I2S_OUT>;
+               clock-names = "mclk";
+               #sound-dai-cells = <0>;
+       };
+};
+
+&i2s {
+       #sound-dai-cells = <0>;
+       status = "okay";
 };
 
 &sdio {
 
        broken-cd;
        bus-width = <4>;
+       cap-sd-highspeed;
        cap-sdio-irq;
        default-sample-phase = <90>;
        keep-power-in-suspend;
+       mmc-pwrseq = <&sdio_pwrseq>;
        non-removable;
        num-slots = <1>;
        pinctrl-names = "default";
        pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
+       sd-uhs-sdr104;
+};
+
+&sdmmc {
+       bus-width = <4>;
+       cap-mmc-highspeed;
+       cap-sd-highspeed;
+       card-detect-delay = <200>;
+       disable-wp;
+       num-slots = <1>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>;
+};
+
+&uart0 {
+       status = "okay";
 };
 
 &uart2 {
 };
 
 &pinctrl {
+       leds {
+               led_ctl: led-ctl {
+                       rockchip,pins = <2 30 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
+
        pmic {
                pmic_int: pmic-int {
                        rockchip,pins = <2 2 RK_FUNC_GPIO &pcfg_pull_default>;
                };
        };
 
+       sdio {
+               bt_wake_h: bt-wake-h {
+                       rockchip,pins = <2 8 RK_FUNC_GPIO &pcfg_pull_default>;
+               };
+       };
+
+       sdmmc {
+               sdmmc_pwr: sdmmc-pwr {
+                       rockchip,pins = <2 28 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
+
        sleep {
                global_pwroff: global-pwroff {
                        rockchip,pins = <2 7 RK_FUNC_1 &pcfg_pull_none>;
index b9567c1e068771ba2d822d4c2fae79173a8a29e6..3864fa142df045e6be19fa2480d1c97d6870f0c2 100644 (file)
@@ -60,6 +60,7 @@
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
+               spi = &spi;
        };
 
        memory {
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC2>;
                        clock-names = "apb_pclk";
                };
        };
 
        usb_otg: usb@10180000 {
-               compatible = "rockchip,rk3288-usb", "rockchip,rk3066-usb",
+               compatible = "rockchip,rk3036-usb", "rockchip,rk3066-usb",
                                "snps,dwc2";
                reg = <0x10180000 0x40000>;
                interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        usb_host: usb@101c0000 {
-               compatible = "rockchip,rk3288-usb", "rockchip,rk3066-usb",
+               compatible = "rockchip,rk3036-usb", "rockchip,rk3066-usb",
                                "snps,dwc2";
                reg = <0x101c0000 0x40000>;
                interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        emmc: dwmmc@1021c000 {
-               compatible = "rockchip,rk3288-dw-mshc";
+               compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
                reg = <0x1021c000 0x4000>;
                interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
                broken-cd;
                interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
-               clock-names = "i2s_hclk", "i2s_clk";
-               clocks = <&cru HCLK_I2S>, <&cru SCLK_I2S>;
+               clock-names = "i2s_clk", "i2s_hclk";
+               clocks = <&cru SCLK_I2S>, <&cru HCLK_I2S>;
                dmas = <&pdma 0>, <&pdma 1>;
                dma-names = "tx", "rx";
                pinctrl-names = "default";
        };
 
        i2c1: i2c@20056000 {
-               compatible = "rockchip,rk3288-i2c";
+               compatible = "rockchip,rk3036-i2c", "rockchip,rk3288-i2c";
                reg = <0x20056000 0x1000>;
                interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
        };
 
        i2c2: i2c@2005a000 {
-               compatible = "rockchip,rk3288-i2c";
+               compatible = "rockchip,rk3036-i2c", "rockchip,rk3288-i2c";
                reg = <0x2005a000 0x1000>;
                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
        };
 
        i2c0: i2c@20072000 {
-               compatible = "rockchip,rk3288-i2c";
+               compatible = "rockchip,rk3036-i2c", "rockchip,rk3288-i2c";
                reg = <0x20072000 0x1000>;
                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                status = "disabled";
        };
 
+       spi: spi@20074000 {
+               compatible = "rockchip,rockchip-spi";
+               reg = <0x20074000 0x1000>;
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks =<&cru PCLK_SPI>, <&cru SCLK_SPI>;
+               clock-names = "apb-pclk","spi_pclk";
+               dmas = <&pdma 8>, <&pdma 9>;
+               dma-names = "tx", "rx";
+               pinctrl-names = "default";
+               pinctrl-0 = <&spi_txd &spi_rxd &spi_clk &spi_cs0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+       };
+
        pinctrl: pinctrl {
                compatible = "rockchip,rk3036-pinctrl";
                rockchip,grf = <&grf>;
 
                i2s {
                        i2s_bus: i2s-bus {
-                               rockchip,pins = <1 0 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 1 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 2 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 3 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 4 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 5 RK_FUNC_1 &pcfg_pull_none>;
+                               rockchip,pins = <1 0 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 1 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 2 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 3 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 4 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 5 RK_FUNC_1 &pcfg_pull_default>;
                        };
                };
 
                        };
                        /* no rts / cts for uart2 */
                };
+
+               spi {
+                       spi_txd:spi-txd {
+                               rockchip,pins = <1 29 RK_FUNC_3 &pcfg_pull_default>;
+                       };
+
+                       spi_rxd:spi-rxd {
+                               rockchip,pins = <1 28 RK_FUNC_3 &pcfg_pull_default>;
+                       };
+
+                       spi_clk:spi-clk {
+                               rockchip,pins = <2 0 RK_FUNC_2 &pcfg_pull_default>;
+                       };
+
+                       spi_cs0:spi-cs0 {
+                               rockchip,pins = <1 30 RK_FUNC_3 &pcfg_pull_default>;
+
+                       };
+
+                       spi_cs1:spi-cs1 {
+                               rockchip,pins = <1 31 RK_FUNC_3 &pcfg_pull_default>;
+
+                       };
+               };
        };
 };
index 38c91a839795f3cf77103f56fdb7bb0185ad2854..6d2a5b3a84a86635f65b8d43aae475040dc54c4e 100644 (file)
                reg = <0x60000000 0x40000000>;
        };
 
+       vdd_log: vdd-log {
+               compatible = "pwm-regulator";
+               pwms = <&pwm3 0 1000>;
+               regulator-name = "vdd_log";
+               regulator-min-microvolt = <1200000>;
+               regulator-max-microvolt = <1200000>;
+               regulator-always-on;
+               voltage-table = <1000000 100>,
+                               <1200000 42>;
+               status = "okay";
+       };
+
        vcc_sd0: fixed-regulator {
                compatible = "regulator-fixed";
                regulator-name = "sdmmc-supply";
@@ -74,7 +86,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
                button@1 {
@@ -82,7 +94,6 @@
                        linux,code = <104>;
                        label = "GPIO Key Vol-";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <0>;
                        debounce-interval = <100>;
                };
                /* VOL+ comes somehow thru the ADC */
        disable-wp;
 };
 
+&pwm3 {
+       status = "okay";
+};
+
 &uart0 {
        status = "okay";
 };
index 7cdc308bfac54c1ec6e286f29c4ad0c846df41aa..a2b763e949b4648f60e3929d0afba5afc711a61b 100644 (file)
                reg = <0x60000000 0x40000000>;
        };
 
+       vdd_log: vdd-log {
+               compatible = "pwm-regulator";
+               pwms = <&pwm3 0 1000>;
+               regulator-name = "vdd_log";
+               regulator-min-microvolt = <1200000>;
+               regulator-max-microvolt = <1200000>;
+               regulator-always-on;
+               voltage-table = <1000000 100>,
+                               <1200000 42>;
+               status = "okay";
+       };
+
        vcc_sd0: sdmmc-regulator {
                compatible = "regulator-fixed";
                regulator-name = "sdmmc-supply";
        };
 };
 
+&pwm3 {
+       status = "okay";
+};
+
 &uart0 {
        status = "okay";
 };
index 341c1f87936a7d20114175802e79f0a3196e9a25..05533005a809cef3d40731a2f3b4d1477a531d26 100644 (file)
@@ -65,7 +65,7 @@
                #size-cells = <0>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio6 2 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
                };
        };
 
+       vdd_log: vdd-log {
+               compatible = "pwm-regulator";
+               pwms = <&pwm3 0 1000>;
+               regulator-name = "vdd_log";
+               regulator-min-microvolt = <1200000>;
+               regulator-max-microvolt = <1200000>;
+               regulator-always-on;
+               voltage-table = <1000000 100>,
+                               <1200000 42>;
+               status = "okay";
+       };
+
        vsys: vsys-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vsys";
        status = "okay";
 };
 
+&pwm3 {
+       status = "okay";
+};
+
 &saradc {
        vref-supply = <&vcc_25>;
        status = "okay";
index 58bac5053858bc95bcc8f089fd32deb9fdfa0b32..cb0a552e0b181674365aa0285ae7b92d73dc746b 100644 (file)
                        reg = <0x0>;
                        operating-points = <
                                /* kHz    uV */
-                               1008000 1075000
-                                816000 1025000
-                                600000 1025000
-                                504000 1000000
-                                312000  975000
+                               1416000 1300000
+                               1200000 1175000
+                               1008000 1125000
+                               816000  1125000
+                               600000  1100000
+                               504000  1100000
+                               312000  1075000
                        >;
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
                clock-names = "timer", "pclk";
        };
 
+       tsadc: tsadc@20060000 {
+               compatible = "rockchip,rk3066-tsadc";
+               reg = <0x20060000 0x100>;
+               clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+               clock-names = "saradc", "apb_pclk";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               #io-channel-cells = <1>;
+               status = "disabled";
+       };
+
        usbphy: phy {
                compatible = "rockchip,rk3066a-usb-phy", "rockchip,rk3288-usb-phy";
                rockchip,grf = <&grf>;
                        reg = <0x17c>;
                        clocks = <&cru SCLK_OTGPHY0>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy1: usb-phy1 {
                        reg = <0x188>;
                        clocks = <&cru SCLK_OTGPHY1>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
        };
 
index 66fa87d1e2c2492f247b703fce10d3b34f2076e9..0b6924c97b6bf0fd40e6062ac0b279e2edaea7a2 100644 (file)
@@ -63,7 +63,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 348d46b7ada5a07ebdefe4dafc1630ddd079c0b7..9271833958f9dcd506d2b0365671414c4ec72460 100644 (file)
                        reg = <0x10c>;
                        clocks = <&cru SCLK_OTGPHY0>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy1: usb-phy1 {
                        reg = <0x11c>;
                        clocks = <&cru SCLK_OTGPHY1>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
        };
 
index 4faabdb65868e38c0e4f96143d4a3cd5370afe5e..78d47f7d2938532d435954c731ba7e99063939c2 100644 (file)
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 4e3fd9aefe3497e464bc8fecdb10a688372460a6..98c586a43c73011f3aba567c784f573b8ab57dab 100644 (file)
@@ -91,7 +91,7 @@
                #size-cells = <0>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
                output-low;
        };
 
+       pcfg_pull_up_drv_12ma: pcfg-pull-up-drv-12ma {
+               bias-pull-up;
+               drive-strength = <12>;
+       };
+
        act8846 {
                pwr_hold: pwr-hold {
                        rockchip,pins = <0 1 RK_FUNC_GPIO &pcfg_output_high>;
        };
 
        sdmmc {
+               /*
+                * Default drive strength isn't enough to achieve even
+                * high-speed mode on firefly board so bump up to 12ma.
+                */
+               sdmmc_bus4: sdmmc-bus4 {
+                       rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+                                       <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+                                       <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+                                       <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+               };
+
+               sdmmc_clk: sdmmc-clk {
+                       rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+               };
+
+               sdmmc_cmd: sdmmc-cmd {
+                       rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+               };
+
                sdmmc_pwr: sdmmc-pwr {
                        rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
                };
index 65c475642d5a79196925a50498a567aad75f72ef..2ff9689d2e1b831c26f8e5660ee910ad2c18e6c6 100644 (file)
@@ -74,7 +74,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 17f13c73fe5e078e1adebbd1932615d41b89bbcb..510a1d0d7abb5b4022c4e2b7500d9049540a1ec3 100644 (file)
@@ -73,7 +73,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 1ece66f3e162786e839c26e5ef89c04b4bc71fec..e1ee9f949035cae2a2a1b84fad09b35feebe6440 100644 (file)
                clock-output-names = "ext_gmac";
        };
 
+       io_domains: io-domains {
+               compatible = "rockchip,rk3288-io-voltage-domain";
+               rockchip,grf = <&grf>;
+
+               audio-supply = <&vcc_io>;
+               bb-supply = <&vcc_io>;
+               dvp-supply = <&vcc_18>;
+               flash0-supply = <&vcc_flash>;
+               flash1-supply = <&vccio_pmu>;
+               gpio30-supply = <&vccio_pmu>;
+               gpio1830 = <&vcc_io>;
+               lcdc-supply = <&vcc_io>;
+               sdcard-supply = <&vccio_sd>;
+               wifi-supply = <&vcc_18>;
+       };
+
+       vcc_flash: flash-regulator {
+               compatible = "regulator-fixed";
+               regulator-name = "vcc_sys";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               startup-delay-us = <150>;
+               vin-supply = <&vcc_io>;
+       };
+
        vcc_sys: vsys-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vcc_sys";
        pinctrl-names = "default";
        pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
        vmmc-supply = <&vcc_io>;
+       vqmmc-supply = <&vcc_flash>;
        status = "okay";
 };
 
index c5453a0b07fca940227c8f24a92b838cbe43219d..dd3ad2e93a6d538dcd29c5dbc17ca2feb8d82d5d 100644 (file)
                stdout-path = "serial2:115200n8";
        };
 
+       gpio-leds {
+               compatible = "gpio-leds";
+
+               heartbeat {
+                       gpios = <&gpio7 15 GPIO_ACTIVE_LOW>;
+                       label = "rock2:green:state1";
+                       linux,default-trigger = "heartbeat";
+               };
+
+               mmc {
+                       gpios = <&gpio0 11 GPIO_ACTIVE_LOW>;
+                       label = "rock2:blue:state2";
+                       linux,default-trigger = "mmc0";
+               };
+       };
+
        ir: ir-receiver {
                compatible = "gpio-ir-receiver";
                gpios = <&gpio8 1 GPIO_ACTIVE_LOW>;
                #sound-dai-cells = <0>;
        };
 
+       sdio_pwrseq: sdio-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&hym8563>;
+               clock-names = "ext_clock";
+               pinctrl-names = "default";
+               pinctrl-0 = <&wifi_enable>;
+               reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
+       };
+
        vcc_usb_host: vcc-host-regulator {
                compatible = "regulator-fixed";
                enable-active-high;
        };
 };
 
+&sdio0 {
+       bus-width = <4>;
+       cap-sd-highspeed;
+       cap-sdio-irq;
+       disable-wp;
+       mmc-pwrseq = <&sdio_pwrseq>;
+       non-removable;
+       num-slots = <1>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk &sdio0_int>;
+       vmmc-supply = <&vcc_io>;
+       vqmmc-supply = <&vcc_18>;
+       status = "okay";
+};
+
 &sdmmc {
        bus-width = <4>;
        cap-mmc-highspeed;
 };
 
 &i2c0 {
-       hym8563@51 {
+       hym8563: hym8563@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
                        rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
                };
        };
+
+       sdio {
+               wifi_enable: wifi-enable {
+                       rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
 };
 
 &spdif {
index 136d650dd05faf28b5656ea980b2b799edc06ba8..610769d99522ea47e1765282469b797d378dc5c8 100644 (file)
        lid {
                label = "Lid";
                gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
-               gpio-key,wakeup;
+               wakeup-source;
                linux,code = <0>; /* SW_LID */
                linux,input-type = <5>; /* EV_SW */
                debounce-interval = <1>;
index 9fce91ffff6fd89b1f39ba960dfeecd71ff8ae24..412809c60d01901087f67ed1107f2538ae3a8f1b 100644 (file)
@@ -64,7 +64,7 @@
                        gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
                        debounce-interval = <100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
        i2c-scl-rising-time-ns = <1000>;
 };
 
-&power {
-       assigned-clocks = <&cru SCLK_EDP_24M>;
-       assigned-clock-parents = <&xin24m>;
-};
-
 &pwm1 {
        status = "okay";
 };
        status = "okay";
 
        assigned-clocks = <&cru SCLK_USBPHY480M_SRC>;
-       assigned-clock-parents = <&cru SCLK_OTGPHY0>;
+       assigned-clock-parents = <&usbphy0>;
        dr_mode = "host";
 };
 
index 8ac49f3efc178ddc05ead42d5fec2178280275c2..0934b6abcaab59cc29bc9ef8f4ab573a46123291 100644 (file)
                        interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC2>;
                        clock-names = "apb_pclk";
                };
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC1>;
                        clock-names = "apb_pclk";
                        status = "disabled";
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC1>;
                        clock-names = "apb_pclk";
                };
                        #address-cells = <1>;
                        #size-cells = <0>;
 
+                       assigned-clocks = <&cru SCLK_EDP_24M>;
+                       assigned-clock-parents = <&xin24m>;
+
                        /*
                         * Note: Although SCLK_* are the working clocks
                         * of device without including on the NOC, needed for
                                reg = <0>;
                                remote-endpoint = <&hdmi_in_vopb>;
                        };
+                       vopb_out_mipi: endpoint@2 {
+                               reg = <2>;
+                               remote-endpoint = <&mipi_in_vopb>;
+                       };
                };
        };
 
                                reg = <0>;
                                remote-endpoint = <&hdmi_in_vopl>;
                        };
+                       vopl_out_mipi: endpoint@2 {
+                               reg = <2>;
+                               remote-endpoint = <&mipi_in_vopl>;
+                       };
                };
        };
 
                status = "disabled";
        };
 
+       mipi_dsi: mipi@ff960000 {
+               compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
+               reg = <0xff960000 0x4000>;
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>;
+               clock-names = "ref", "pclk";
+               rockchip,grf = <&grf>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <1>;
+
+                       mipi_in: port {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               mipi_in_vopb: endpoint@0 {
+                                       reg = <0>;
+                                       remote-endpoint = <&vopb_out_mipi>;
+                               };
+                               mipi_in_vopl: endpoint@1 {
+                                       reg = <1>;
+                                       remote-endpoint = <&vopl_out_mipi>;
+                               };
+                       };
+               };
+       };
+
        hdmi: hdmi@ff980000 {
                compatible = "rockchip,rk3288-dw-hdmi";
                reg = <0xff980000 0x20000>;
                        reg = <0x320>;
                        clocks = <&cru SCLK_OTGPHY0>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy1: usb-phy1 {
                        reg = <0x334>;
                        clocks = <&cru SCLK_OTGPHY1>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy2: usb-phy2 {
                        reg = <0x348>;
                        clocks = <&cru SCLK_OTGPHY2>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
        };
 
index 99eeea70223b94c9492a9df2fb8f7e4ffa373613..f1581f9d050f54037f24e5029d1814e9f759b520 100644 (file)
@@ -78,6 +78,7 @@
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMA1>;
                        clock-names = "apb_pclk";
                };
@@ -88,6 +89,7 @@
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMA1>;
                        clock-names = "apb_pclk";
                        status = "disabled";
                        interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMA2>;
                        clock-names = "apb_pclk";
                };
index aa64faa72970113a887c22836f9ddb5da5aa6e68..da24ab570b0e8c4b278af7d4844eec2c6e2d777b 100644 (file)
                        linux,code = <KEY_POWER>;
                        label = "power";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
 
 &keypad {
        linux,input-no-autorepeat;
-       linux,input-wakeup;
+       wakeup-source;
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <3>;
        pinctrl-names = "default";
index 3b76eeeb8410a66a31ff33018df38e2ba2ce990e..0a33d402138e14fb054bc32beadf748b4f9c20cd 100644 (file)
                        linux,code = <KEY_POWER>;
                        label = "power";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
 
 &keypad {
        linux,input-no-autorepeat;
-       linux,input-wakeup;
+       wakeup-source;
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <3>;
        pinctrl-names = "default";
index da7d210df6704d4304809e290114f3b05bb7506a..54fcc3fc82e20c2c8dba67186f92a213645dcaa6 100644 (file)
@@ -59,7 +59,7 @@
 
 &keypad {
        linux,input-no-autorepeat;
-       linux,input-wakeup;
+       wakeup-source;
        samsung,keypad-num-rows = <8>;
        samsung,keypad-num-columns = <8>;
        pinctrl-names = "default";
index b8032bca462152e6904d299f84514c483dde0165..db1151c18466c3ac530be0ba39ba2f00be3a06bc 100644 (file)
                        dbgu: serial@fc069000 {
                                compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
                                reg = <0xfc069000 0x200>;
-                               interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
+                               interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_dbgu>;
                                clocks = <&dbgu_clk>;
index 3a6056f9f0d23f17edb87674882e3b98f31fee49..bf825ca4f6f7912a6407abe9b3b3bc65a26e3637 100644 (file)
@@ -58,7 +58,7 @@
        L2: cache-controller {
                compatible = "arm,pl310-cache";
                reg = <0xf0100000 0x1000>;
-               interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
                power-domains = <&pd_a3sm>;
                arm,data-latency = <3 3 3>;
                arm,tag-latency = <2 2 2>;
@@ -70,8 +70,8 @@
        sbsc2: memory-controller@fb400000 {
                compatible = "renesas,sbsc-sh73a0";
                reg = <0xfb400000 0x400>;
-               interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "sec", "temp";
                power-domains = <&pd_a4bc1>;
        };
        sbsc1: memory-controller@fe400000 {
                compatible = "renesas,sbsc-sh73a0";
                reg = <0xfe400000 0x400>;
-               interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 36 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "sec", "temp";
                power-domains = <&pd_a4bc0>;
        };
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 56 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        cmt1: timer@e6138000 {
                compatible = "renesas,cmt-48-sh73a0", "renesas,cmt-48";
                reg = <0xe6138000 0x200>;
-               interrupts = <0 65 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
                        <0xe6900020 1>,
                        <0xe6900040 1>,
                        <0xe6900060 1>;
-               interrupts = <0 1 IRQ_TYPE_LEVEL_HIGH
-                             0 2 IRQ_TYPE_LEVEL_HIGH
-                             0 3 IRQ_TYPE_LEVEL_HIGH
-                             0 4 IRQ_TYPE_LEVEL_HIGH
-                             0 5 IRQ_TYPE_LEVEL_HIGH
-                             0 6 IRQ_TYPE_LEVEL_HIGH
-                             0 7 IRQ_TYPE_LEVEL_HIGH
-                             0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                        <0xe6900024 1>,
                        <0xe6900044 1>,
                        <0xe6900064 1>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH
-                             0 10 IRQ_TYPE_LEVEL_HIGH
-                             0 11 IRQ_TYPE_LEVEL_HIGH
-                             0 12 IRQ_TYPE_LEVEL_HIGH
-                             0 13 IRQ_TYPE_LEVEL_HIGH
-                             0 14 IRQ_TYPE_LEVEL_HIGH
-                             0 15 IRQ_TYPE_LEVEL_HIGH
-                             0 16 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                        <0xe6900028 1>,
                        <0xe6900048 1>,
                        <0xe6900068 1>;
-               interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH
-                             0 18 IRQ_TYPE_LEVEL_HIGH
-                             0 19 IRQ_TYPE_LEVEL_HIGH
-                             0 20 IRQ_TYPE_LEVEL_HIGH
-                             0 21 IRQ_TYPE_LEVEL_HIGH
-                             0 22 IRQ_TYPE_LEVEL_HIGH
-                             0 23 IRQ_TYPE_LEVEL_HIGH
-                             0 24 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                        <0xe690002c 1>,
                        <0xe690004c 1>,
                        <0xe690006c 1>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH
-                             0 26 IRQ_TYPE_LEVEL_HIGH
-                             0 27 IRQ_TYPE_LEVEL_HIGH
-                             0 28 IRQ_TYPE_LEVEL_HIGH
-                             0 29 IRQ_TYPE_LEVEL_HIGH
-                             0 30 IRQ_TYPE_LEVEL_HIGH
-                             0 31 IRQ_TYPE_LEVEL_HIGH
-                             0 32 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6820000 0x425>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH
-                             0 168 IRQ_TYPE_LEVEL_HIGH
-                             0 169 IRQ_TYPE_LEVEL_HIGH
-                             0 170 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks SH73A0_CLK_IIC0>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6822000 0x425>;
-               interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH
-                             0 52 IRQ_TYPE_LEVEL_HIGH
-                             0 53 IRQ_TYPE_LEVEL_HIGH
-                             0 54 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_IIC1>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6824000 0x425>;
-               interrupts = <0 171 IRQ_TYPE_LEVEL_HIGH
-                             0 172 IRQ_TYPE_LEVEL_HIGH
-                             0 173 IRQ_TYPE_LEVEL_HIGH
-                             0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks SH73A0_CLK_IIC2>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6826000 0x425>;
-               interrupts = <0 183 IRQ_TYPE_LEVEL_HIGH
-                             0 184 IRQ_TYPE_LEVEL_HIGH
-                             0 185 IRQ_TYPE_LEVEL_HIGH
-                             0 186 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks SH73A0_CLK_IIC3>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6828000 0x425>;
-               interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH
-                             0 188 IRQ_TYPE_LEVEL_HIGH
-                             0 189 IRQ_TYPE_LEVEL_HIGH
-                             0 190 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks SH73A0_CLK_IIC4>;
                power-domains = <&pd_c5>;
                status = "disabled";
        mmcif: mmc@e6bd0000 {
                compatible = "renesas,sh-mmcif";
                reg = <0xe6bd0000 0x100>;
-               interrupts = <0 140 IRQ_TYPE_LEVEL_HIGH
-                             0 141 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_MMCIF0>;
                power-domains = <&pd_a3sp>;
                reg-io-width = <4>;
        msiof0: spi@e6e20000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6e20000 0x0064>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks SH73A0_CLK_MSIOF0>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        msiof1: spi@e6e10000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6e10000 0x0064>;
-               interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_MSIOF1>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        msiof2: spi@e6e00000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6e00000 0x0064>;
-               interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_MSIOF2>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        msiof3: spi@e6c90000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6c90000 0x0064>;
-               interrupts = <0 59 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_MSIOF3>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-sh73a0";
                reg = <0xee100000 0x100>;
-               interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH
-                             0 84 IRQ_TYPE_LEVEL_HIGH
-                             0 85 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SDHI0>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi1: sd@ee120000 {
                compatible = "renesas,sdhi-sh73a0";
                reg = <0xee120000 0x100>;
-               interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH
-                             0 89 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SDHI1>;
                power-domains = <&pd_a3sp>;
                toshiba,mmc-wrprotect-disable;
        sdhi2: sd@ee140000 {
                compatible = "renesas,sdhi-sh73a0";
                reg = <0xee140000 0x100>;
-               interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH
-                             0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SDHI2>;
                power-domains = <&pd_a3sp>;
                toshiba,mmc-wrprotect-disable;
        scifa0: serial@e6c40000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c40000 0x100>;
-               interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa1: serial@e6c50000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c50000 0x100>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa2: serial@e6c60000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c60000 0x100>;
-               interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa3: serial@e6c70000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c70000 0x100>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa4: serial@e6c80000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c80000 0x100>;
-               interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa5: serial@e6cb0000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6cb0000 0x100>;
-               interrupts = <0 79 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa6: serial@e6cc0000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6cc0000 0x100>;
-               interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SCIFA6>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa7: serial@e6cd0000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6cd0000 0x100>;
-               interrupts = <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA7>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb: serial@e6c30000 {
                compatible = "renesas,scifb-sh73a0", "renesas,scifb";
                reg = <0xe6c30000 0x100>;
-               interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFB>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
                #sound-dai-cells = <1>;
                compatible = "renesas,fsi2-sh73a0", "renesas,sh_fsi2";
                reg = <0xec230000 0x400>;
-               interrupts = <0 146 0x4>;
+               interrupts = <GIC_SPI 146 0x4>;
                power-domains = <&pd_a4mp>;
                status = "disabled";
        };
                #size-cells = <1>;
                ranges = <0 0 0x20000000>;
                reg = <0xfec10000 0x400>;
-               interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&zb_clk>;
                power-domains = <&pd_a4s>;
        };
index d0c74385331803383d5296d7157516aa63c63758..27a333eb89870167b82a170d3553bd5078fafdfb 100644 (file)
                        };
                        mmcsd_default_mode: mmcsd_default {
                                mmcsd_default_cfg1 {
-                                       /* MCCLK */
-                                       pins = "GPIO8_B10";
-                                       ste,output = <0>;
-                               };
-                               mmcsd_default_cfg2 {
-                                       /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
-                                       pins = "GPIO10_C11", "GPIO15_A12",
-                                       "GPIO16_C13", "GPIO23_D15";
-                                       ste,output = <1>;
-                               };
-                               mmcsd_default_cfg3 {
-                                       /* MCCMD, MCDAT3-0, MCMSFBCLK */
-                                       pins = "GPIO9_A10", "GPIO11_B11",
-                                       "GPIO12_A11", "GPIO13_C12",
-                                       "GPIO14_B12", "GPIO24_C15";
-                                       ste,input = <1>;
+                                       /*
+                                        * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
+                                        * MCCMD, MCDAT3-0, MCMSFBCLK
+                                        */
+                                       pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
+                                              "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
+                                              "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
+                                       ste,output = <2>;
                                };
                        };
                };
                        clock-names = "mclk", "apb_pclk";
                        interrupt-parent = <&vica>;
                        interrupts = <22>;
-                       max-frequency = <48000000>;
+                       max-frequency = <400000>;
                        bus-width = <4>;
                        cap-mmc-highspeed;
                        cap-sd-highspeed;
+                       full-pwr-cycle;
+                       /*
+                        * The STw4811 circuit used with the Nomadik strictly
+                        * requires that all of these signal direction pins be
+                        * routed and used for its 4-bit levelshifter.
+                        */
+                       st,sig-dir-dat0;
+                       st,sig-dir-dat2;
+                       st,sig-dir-dat31;
+                       st,sig-dir-cmd;
+                       st,sig-pin-fbclk;
                        pinctrl-names = "default";
                        pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
                        vmmc-supply = <&vmmc_regulator>;
index 53660894ea95ebb953446caf650f43526e0f7a8a..023b03efa5fff7cd978b5820b9b8b5e3f2015052 100644 (file)
@@ -45,6 +45,7 @@
 #include "sunxi-common-regulators.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        model = "Chuwi V7 CW0825";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c2_pins_a>;
        status = "okay";
+
+       ft5306de4: touchscreen@38 {
+               compatible = "edt,edt-ft5406";
+               reg = <0x38>;
+               interrupt-parent = <&pio>;
+               interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+               touchscreen-size-x = <1024>;
+               touchscreen-size-y = <768>;
+       };
 };
 
 &lradc {
index 77c31dab86b137d44fac84aec557587b0b594fe7..04b0d2d1ae6c1e2890f28abe0043f3ebcdbd9e95 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        model = "INet-97F Rev 02";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c2_pins_a>;
        status = "okay";
+
+       ft5406ee8: touchscreen@38 {
+               compatible = "edt,edt-ft5406";
+               reg = <0x38>;
+               interrupt-parent = <&pio>;
+               interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+               touchscreen-size-x = <800>;
+               touchscreen-size-y = <480>;
+       };
 };
 
 &lradc {
index ca49b0d0ce1e0bad3a3de2552acb04f66ee8b952..bba4f9cf9bf5d6c2f51faa05ea0ac90deb48bacc 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&i2c2_pins_a>;
        status = "okay";
+
+       ft5406ee8: touchscreen@38 {
+               compatible = "edt,edt-ft5406";
+               reg = <0x38>;
+               interrupt-parent = <&pio>;
+               interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+               touchscreen-size-x = <800>;
+               touchscreen-size-y = <480>;
+       };
 };
 
 &lradc {
index 985e15503378f818299eed0110f1678fa2c016cc..4e798f014c992c949fdf186facae567c1c8b1de2 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2015 Josef Gajdusek <atx@atx.name>
+ * Copyright 2015 - Marcus Cooper <codekipper@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
 
 /dts-v1/;
 #include "sun4i-a10.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include "sunxi-itead-core-common.dtsi"
 
 / {
        model = "Iteaduino Plus A10";
        compatible = "itead,iteaduino-plus-a10", "allwinner,sun4i-a10";
-
-       aliases {
-               serial0 = &uart0;
-       };
-
-       chosen {
-               stdout-path = "serial0:115200n8";
-       };
 };
 
 &ahci {
        status = "okay";
 };
 
-&cpu0 {
-       cpu-supply = <&reg_dcdc2>;
-};
-
-&ehci0 {
-       status = "okay";
-};
-
-&ehci1 {
-       status = "okay";
-};
-
 &emac {
        pinctrl-names = "default";
        pinctrl-0 = <&emac_pins_a>;
 };
 
 &i2c0 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&i2c0_pins_a>;
-       status = "okay";
-
        axp209: pmic@34 {
-               reg = <0x34>;
                interrupts = <0>;
        };
 };
        status = "okay";
 };
 
-&ohci0 {
-       status = "okay";
-};
-
-&ohci1 {
-       status = "okay";
-};
-
 &reg_ahci_5v {
        status = "okay";
 };
 
-#include "axp209.dtsi"
-
-&reg_dcdc2 {
-       regulator-always-on;
-       regulator-min-microvolt = <1000000>;
-       regulator-max-microvolt = <1450000>;
-       regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc3 {
-       regulator-always-on;
-       regulator-min-microvolt = <1000000>;
-       regulator-max-microvolt = <1400000>;
-       regulator-name = "vdd-int-dll";
-};
-
-&reg_ldo1 {
-       regulator-name = "vdd-rtc";
-};
-
-&reg_ldo2 {
-       regulator-always-on;
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
-       regulator-name = "avcc";
-};
-
-&reg_usb1_vbus {
-       status = "okay";
-};
-
-&reg_usb2_vbus {
-       status = "okay";
-};
-
 &spi0 {
        pinctrl-names = "default";
        pinctrl-0 = <&spi0_pins_a>,
                    <&spi0_cs0_pins_a>;
        status = "okay";
 };
-
-&uart0 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&uart0_pins_a>;
-       status = "okay";
-};
-
-&usbphy {
-       usb1_vbus-supply = <&reg_usb1_vbus>;
-       usb2_vbus-supply = <&reg_usb2_vbus>;
-       status = "okay";
-};
index 530ab28e9ca239b2da64d046a1c0c1da7d94b9d0..f6898c6b84d426bdf45df86495bc69cad91bb064 100644 (file)
        status = "okay";
 };
 
+&cpu0 {
+       cpu-supply = <&reg_dcdc2>;
+};
+
 &ehci0 {
        status = "okay";
 };
index b6ad7850fac6931ee1eb6f1716418c16b849eae4..7a198dcd4ae81264ce0ff37b504b7fc43aab5338 100644 (file)
@@ -65,7 +65,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll6 0>;
+                       clocks = <&pll6>;
                        status = "disabled";
                };
 
@@ -73,7 +73,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll6 0>;
+                       clocks = <&pll6>;
                        status = "disabled";
                };
        };
                };
 
                pll6: clk@01c20028 {
-                       #clock-cells = <1>;
+                       #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-pll6-clk";
                        reg = <0x01c20028 0x4>;
                        clocks = <&osc24M>;
-                       clock-output-names = "pll6", "pll6x2";
+                       clock-output-names = "pll6";
                };
 
                cpu: cpu@01c20050 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
                        clock-output-names = "ahb1";
 
                        /*
                         * controller requires AHB1 clocked from PLL6.
                         */
                        assigned-clocks = <&ahb1>;
-                       assigned-clock-parents = <&pll6 0>;
+                       assigned-clock-parents = <&pll6>;
                };
 
                ahb1_gates: clk@01c20060 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
                        clock-output-names = "apb2";
                };
 
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc0",
                                             "mmc0_output",
                                             "mmc0_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc1",
                                             "mmc1_output",
                                             "mmc1_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc2",
                                             "mmc2_output",
                                             "mmc2_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc3",
                                             "mmc3_output",
                                             "mmc3_sample";
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c2009c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "ss";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200a0 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi0";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200a4 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200a8 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi2";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200ac 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi3";
                };
 
                                allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
                        };
 
+                       mmc3_8bit_emmc_pins: mmc3@1 {
+                               allwinner,pins = "PC6", "PC7", "PC8", "PC9",
+                                                "PC10", "PC11", "PC12",
+                                                "PC13", "PC14", "PC15",
+                                                "PC24";
+                               allwinner,function = "mmc3";
+                               allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+
                        gmac_pins_mii_a: gmac_mii@0 {
                                allwinner,pins = "PA0", "PA1", "PA2", "PA3",
                                                "PA8", "PA9", "PA11",
                        ar100: ar100_clk {
                                compatible = "allwinner,sun6i-a31-ar100-clk";
                                #clock-cells = <0>;
-                               clocks = <&osc32k>, <&osc24M>, <&pll6 0>,
-                                        <&pll6 0>;
+                               clocks = <&osc32k>, <&osc24M>, <&pll6>,
+                                        <&pll6>;
                                clock-output-names = "ar100";
                        };
 
index ea69fb8ad4d80aab599c5c8e313429706a23bf87..4ec0c8679b2e20b9a15b28160639daf264abd0d6 100644 (file)
 };
 
 /* eMMC on core board */
-&mmc2 {
+&mmc3 {
        pinctrl-names = "default";
-       pinctrl-0 = <&mmc2_8bit_emmc_pins>;
+       pinctrl-0 = <&mmc3_8bit_emmc_pins>;
        vmmc-supply = <&reg_dcdc1>;
+       vqmmc-supply = <&reg_dcdc1>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
new file mode 100644 (file)
index 0000000..661c21d
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2015 - Marcus Cooper <codekipper@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun7i-a20.dtsi"
+#include "sunxi-itead-core-common.dtsi"
+
+/ {
+       model = "Itead Ibox A20";
+       compatible = "itead,itead-ibox-a20", "allwinner,sun7i-a20";
+
+       leds {
+               compatible = "gpio-leds";
+               pinctrl-names = "default";
+               pinctrl-0 = <&led_pins_itead_core>;
+
+               green {
+                       label = "itead_core:green:usr";
+                       gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
+                       default-state = "on";
+               };
+
+               blue {
+                       label = "itead_core:blue:usr";
+                       gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>;
+                       default-state = "on";
+               };
+       };
+};
+
+&ahci {
+       target-supply = <&reg_ahci_5v>;
+       status = "okay";
+};
+
+&codec {
+       status = "okay";
+};
+
+&gmac {
+       pinctrl-names = "default";
+       pinctrl-0 = <&gmac_pins_mii_a>;
+       phy = <&phy1>;
+       phy-mode = "mii";
+       status = "okay";
+
+       phy1: ethernet-phy@1 {
+               reg = <1>;
+       };
+};
+
+&i2c0 {
+       axp209: pmic@34 {
+               interrupt-parent = <&nmi_intc>;
+               interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+       };
+};
+
+&ir0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&ir0_rx_pins_a>;
+       status = "okay";
+};
+
+&mmc0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+       vmmc-supply = <&reg_vcc3v3>;
+       bus-width = <4>;
+       cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
+       cd-inverted;
+       status = "okay";
+};
+
+&pio {
+       led_pins_itead_core: led_pins@0 {
+               allwinner,pins = "PH20","PH21";
+               allwinner,function = "gpio_out";
+               allwinner,drive = <SUN4I_PINCTRL_20_MA>;
+               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+       };
+};
+
+&reg_ahci_5v {
+       status = "okay";
+};
index 6f88fb0ddbc7d13a1f2ee83befbdd111ee36dfb4..783b4b8b7c189e20f38199ce6e6eba1aafb65004 100644 (file)
@@ -60,7 +60,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll6 0>;
+                       clocks = <&pll6>;
                        status = "disabled";
                };
        };
                };
 
                pll6: clk@01c20028 {
-                       #clock-cells = <1>;
+                       #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-pll6-clk";
                        reg = <0x01c20028 0x4>;
                        clocks = <&osc24M>;
-                       clock-output-names = "pll6", "pll6x2";
+                       clock-output-names = "pll6";
+               };
+
+                pll6x2: pll6x2_clk {
+                       compatible = "fixed-factor-clock";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <2>;
+                       clocks = <&pll6>;
+                       clock-output-names = "pll6-2x";
                };
 
                cpu: cpu_clk@01c20050 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
                        clock-output-names = "apb2";
                };
 
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc0",
                                             "mmc0_output",
                                             "mmc0_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc1",
                                             "mmc1_output",
                                             "mmc1_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc2",
                                             "mmc2_output",
                                             "mmc2_sample";
                                allwinner,pins = "PC5", "PC6", "PC8",
                                                 "PC9", "PC10", "PC11",
                                                 "PC12", "PC13", "PC14",
-                                                "PC15";
+                                                "PC15", "PC16";
                                allwinner,function = "mmc2";
                                allwinner,drive = <SUN4I_PINCTRL_30_MA>;
                                allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
index 92e6616979ea42868b3a6bcb77f76ed7ea8083bd..5e589c1ddda93883f8b53852f96d683447ce9b30 100644 (file)
@@ -79,7 +79,7 @@
                        #clock-cells = <0>;
                        compatible = "allwinner,sun8i-a23-mbus-clk";
                        reg = <0x01c2015c 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clocks = <&osc24M>, <&pll6x2>, <&pll5>;
                        clock-output-names = "mbus";
                };
        };
index 13ce68f06dd6e0ab7c7b9a5ba6e05562e4df3868..bd2a3beb4629201443e0ce30072988b25353bcca 100644 (file)
        vmmc-supply = <&reg_vcc3v0>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
 &mmc2_8bit_pins {
+       /* Increase drive strength for DDR modes */
+       allwinner,drive = <SUN4I_PINCTRL_40_MA>;
        /* eMMC is missing pull-ups */
        allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 };
index 001d8402ca1845bca126adab131d69d439ceab6a..f3eb618bcfa745934d7519b072167d4810f9ce4f 100644 (file)
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c2009c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "ss";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun8i-a23-mbus-clk";
                        reg = <0x01c2015c 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&pll5>, <&pll11>;
+                       clocks = <&osc24M>, <&pll6x2>, <&pll5>, <&pll11>;
                        clock-output-names = "mbus";
                };
        };
diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
new file mode 100644 (file)
index 0000000..342e1d3
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Vishnu Patekar
+ * Vishnu Patekar <vishnupatekar0510@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a83t.dtsi"
+
+/ {
+       model = "Allwinner A83T H8Homlet Proto Dev Board v2.0";
+       compatible = "allwinner,h8homlet-v2", "allwinner,sun8i-a83t";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+};
+
+&uart0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&uart0_pins_b>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
new file mode 100644 (file)
index 0000000..88b1e09
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a83t.dtsi"
+
+/ {
+       model = "Cubietech Cubietruck Plus";
+       compatible = "cubietech,cubietruck-plus", "allwinner,sun8i-a83t";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+};
+
+&uart0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&uart0_pins_b>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
new file mode 100644 (file)
index 0000000..d3473f8
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2015 Vishnu Patekar
+ *
+ * Vishnu Patekar <vishnupatekar0510@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+
+ */
+
+#include "skeleton.dtsi"
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+       interrupt-parent = <&gic>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0>;
+               };
+
+               cpu@1 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <1>;
+               };
+
+               cpu@2 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <2>;
+               };
+
+               cpu@3 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <3>;
+               };
+
+               cpu@100 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x100>;
+               };
+
+               cpu@101 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x101>;
+               };
+
+               cpu@102 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x102>;
+               };
+
+               cpu@103 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x103>;
+               };
+       };
+
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       clocks {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* TODO: PRCM block has a mux for this. */
+               osc24M: osc24M_clk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <24000000>;
+                       clock-output-names = "osc24M";
+               };
+
+               /*
+                * This is called "internal OSC" in some places.
+                * It is an internal RC-based oscillator.
+                * TODO: Its controls are in the PRCM block.
+                */
+               osc16M: osc16M_clk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <16000000>;
+                       clock-output-names = "osc16M";
+               };
+
+               osc16Md512: osc16Md512_clk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-factor-clock";
+                       clock-div = <512>;
+                       clock-mult = <1>;
+                       clocks = <&osc16M>;
+                       clock-output-names = "osc16M-d512";
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               pio: pinctrl@01c20800 {
+                       compatible = "allwinner,sun8i-a83t-pinctrl";
+                       interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+                       reg = <0x01c20800 0x400>;
+                       clocks = <&osc24M>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       #gpio-cells = <3>;
+
+                       mmc0_pins_a: mmc0@0 {
+                               allwinner,pins = "PF0", "PF1", "PF2",
+                                                "PF3", "PF4", "PF5";
+                               allwinner,function = "mmc0";
+                               allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+
+                       uart0_pins_a: uart0@0 {
+                               allwinner,pins = "PF2", "PF4";
+                               allwinner,function = "uart0";
+                               allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+
+                       uart0_pins_b: uart0@1 {
+                               allwinner,pins = "PB9", "PB10";
+                               allwinner,function = "uart0";
+                               allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+               };
+
+               timer@01c20c00 {
+                       compatible = "allwinner,sun4i-a10-timer";
+                       reg = <0x01c20c00 0xa0>;
+                       interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&osc24M>;
+               };
+
+               watchdog@01c20ca0 {
+                       compatible = "allwinner,sun6i-a31-wdt";
+                       reg = <0x01c20ca0 0x20>;
+                       interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&osc24M>;
+               };
+
+               uart0: serial@01c28000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x01c28000 0x400>;
+                       interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clocks = <&osc24M>;
+                       status = "disabled";
+               };
+
+               gic: interrupt-controller@01c81000 {
+                       compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+                       reg = <0x01c81000 0x1000>,
+                             <0x01c82000 0x1000>,
+                             <0x01c84000 0x2000>,
+                             <0x01c86000 0x2000>;
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+               };
+       };
+};
index 1524130e43c94c97097b7389dd3951459a1af7ee..6f6b4e469ac933e48cfcea4e27e742edcbb0125e 100644 (file)
                };
 
                pll6: clk@01c20028 {
-                       #clock-cells = <1>;
+                       #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-pll6-clk";
                        reg = <0x01c20028 0x4>;
                        clocks = <&osc24M>;
-                       clock-output-names = "pll6", "pll6x2";
+                       clock-output-names = "pll6";
                };
 
                pll6d2: pll6d2_clk {
                        compatible = "fixed-factor-clock";
                        clock-div = <2>;
                        clock-mult = <1>;
-                       clocks = <&pll6 0>;
-                       clock-output-names = "pll6d2";
+                       clocks = <&pll6>;
+                       clock-output-names = "pll6-d2";
                };
 
-               /* dummy clock until pll6 can be reused */
-               pll8: pll8_clk {
+               pll6x2: pll6x2_clk {
                        #clock-cells = <0>;
-                       compatible = "fixed-clock";
-                       clock-frequency = <1>;
+                       compatible = "fixed-factor-clock";
+                       clock-div = <1>;
+                       clock-mult = <2>;
+                       clocks = <&pll6>;
+                       clock-output-names = "pll6-2x";
+               };
+
+               pll8: clk@01c20044 {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun6i-a31-pll6-clk";
+                       reg = <0x01c20044 0x4>;
+                       clocks = <&osc24M>;
                        clock-output-names = "pll8";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
                        clock-output-names = "apb2";
                };
 
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>, <&pll8>;
+                       clocks = <&osc24M>, <&pll6>, <&pll8>;
                        clock-output-names = "mmc0",
                                             "mmc0_output",
                                             "mmc0_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>, <&pll8>;
+                       clocks = <&osc24M>, <&pll6>, <&pll8>;
                        clock-output-names = "mmc1",
                                             "mmc1_output",
                                             "mmc1_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>, <&pll8>;
+                       clocks = <&osc24M>, <&pll6>, <&pll8>;
                        clock-output-names = "mmc2",
                                             "mmc2_output",
                                             "mmc2_sample";
                        #clock-cells = <0>;
                        compatible = "allwinner,sun8i-a23-mbus-clk";
                        reg = <0x01c2015c 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clocks = <&osc24M>, <&pll6x2>, <&pll5>;
                        clock-output-names = "mbus";
                };
        };
index 382bd9fc5647abbe4e23eb0d8ac3ed855cd1cd4d..eb2ccd0a3bd5d4217f7a2a91a7383d87594ef074 100644 (file)
        vmmc-supply = <&reg_vcc3v0>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
+&mmc2_8bit_pins {
+       /* Increase drive strength for DDR modes */
+       allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+};
+
 &r_ir {
        status = "okay";
 };
index c0060e4f7379fe775d78f6dfb9d5db4c169f7a05..d7a20d92b1143b1a7713d998cec753a2dfada507 100644 (file)
        status = "okay";
 };
 
-&i2c3 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&i2c3_pins_a>;
-       status = "okay";
-};
-
-&i2c3_pins_a {
-       /* Enable internal pull-up */
-       allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
-};
-
 &ohci0 {
        status = "okay";
 };
        vmmc-supply = <&reg_vcc3v0>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
+&mmc2_8bit_pins {
+       /* Increase drive strength for DDR modes */
+       allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+};
+
 &reg_usb1_vbus {
        pinctrl-0 = <&usb1_vbus_pin_optimus>;
        gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
        status = "okay";
 };
 
-&uart4 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&uart4_pins_a>;
-       status = "okay";
-};
-
-&uart4_pins_a {
-       /* Enable internal pull-up */
-       allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
-};
-
 &usbphy1 {
        phy-supply = <&reg_usb1_vbus>;
        status = "okay";
index e838f206f2a0f34f361bf858d00a6762b4d5739c..f68b3242b33a09b0ff0c197c817b75525c9d9bb9 100644 (file)
                };
 
                mmc0: mmc@01c0f000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c0f000 0x1000>;
                        clocks = <&mmc_config_clk 0>, <&mmc0_clk 0>,
                                 <&mmc0_clk 1>, <&mmc0_clk 2>;
                };
 
                mmc1: mmc@01c10000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c10000 0x1000>;
                        clocks = <&mmc_config_clk 1>, <&mmc1_clk 0>,
                                 <&mmc1_clk 1>, <&mmc1_clk 2>;
                };
 
                mmc2: mmc@01c11000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c11000 0x1000>;
                        clocks = <&mmc_config_clk 2>, <&mmc2_clk 0>,
                                 <&mmc2_clk 1>, <&mmc2_clk 2>;
                };
 
                mmc3: mmc@01c12000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c12000 0x1000>;
                        clocks = <&mmc_config_clk 3>, <&mmc3_clk 0>,
                                 <&mmc3_clk 1>, <&mmc3_clk 2>;
                        mmc2_8bit_pins: mmc2_8bit {
                                allwinner,pins = "PC6", "PC7", "PC8", "PC9",
                                                 "PC10", "PC11", "PC12",
-                                                "PC13", "PC14", "PC15";
+                                                "PC13", "PC14", "PC15",
+                                                "PC16";
                                allwinner,function = "mmc2";
                                allwinner,drive = <SUN4I_PINCTRL_30_MA>;
                                allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
diff --git a/arch/arm/boot/dts/sunxi-itead-core-common.dtsi b/arch/arm/boot/dts/sunxi-itead-core-common.dtsi
new file mode 100644 (file)
index 0000000..2565d51
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 - Marcus Cooper <codekipper@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sunxi-common-regulators.dtsi"
+
+/ {
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+};
+
+&cpu0 {
+       cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+       status = "okay";
+};
+
+&ehci1 {
+       status = "okay";
+};
+
+&i2c0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c0_pins_a>;
+       status = "okay";
+
+       axp209: pmic@34 {
+               reg = <0x34>;
+       };
+};
+
+&i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c1_pins_a>;
+       status = "okay";
+};
+
+&ohci0 {
+       status = "okay";
+};
+
+&ohci1 {
+       status = "okay";
+};
+
+#include "axp209.dtsi"
+
+&reg_dcdc2 {
+       regulator-always-on;
+       regulator-min-microvolt = <1000000>;
+       regulator-max-microvolt = <1400000>;
+       regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+       regulator-always-on;
+       regulator-min-microvolt = <1000000>;
+       regulator-max-microvolt = <1400000>;
+       regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+       regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+       regulator-always-on;
+       regulator-min-microvolt = <3000000>;
+       regulator-max-microvolt = <3000000>;
+       regulator-name = "avcc";
+};
+
+&reg_usb1_vbus {
+       status = "okay";
+};
+
+&reg_usb2_vbus {
+       status = "okay";
+};
+
+&uart0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&uart0_pins_a>;
+       status = "okay";
+};
+
+&usbphy {
+       usb1_vbus-supply = <&reg_usb1_vbus>;
+       usb2_vbus-supply = <&reg_usb2_vbus>;
+       status = "okay";
+};
index d845bd1448b5459f9a6e4ab52c538c541b8832ef..5017ed8ad5c4244b07956ce3243f033e76f98d5d 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra114-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra114-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@70006000 {
index 66b4451eb2ca1b98bfbc33263eb31d18df37d1b0..4ee2e63e11d04ec4a7dfa00b6925fc56d08deb50 100644 (file)
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+
+               /* This order keeps the mapping DB9 connector <-> ttyS0 */
                serial0 = &uartd;
+               serial1 = &uarta;
+               serial2 = &uartb;
        };
 
        memory {
                };
        };
 
+       /*
+        * First high speed UART, exposed on the expansion connector J3A2
+        *   Pin 41: BR_UART1_TXD
+        *   Pin 44: BR_UART1_RXD
+        */
+       serial@0,70006000 {
+               compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+               status = "okay";
+       };
+
+       /*
+        * Second high speed UART, exposed on the expansion connector J3A2
+        *   Pin 65: UART2_RXD
+        *   Pin 68: UART2_TXD
+        *   Pin 71: UART2_CTS_L
+        *   Pin 74: UART2_RTS_L
+        */
+       serial@0,70006040 {
+               compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+               status = "okay";
+       };
+
        /* DB9 serial port */
        serial@0,70006300 {
                status = "okay";
index 68669f791c8baa5ec9fd9d27a37e6ef716006861..995289b59e11c7bc5a9ac06c4465839dccf67109 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@0,70006000 {
index 33173e1bace9cd289eda8b67679ab0df7d777c9d..8fb61b93c226a255a3ddae7c7c767b54c425f34e 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra20-uart" and to enable the APB DMA based serial
-        * driver, the comptible is "nvidia,tegra20-hsuart".
+        * driver, the compatible is "nvidia,tegra20-hsuart".
         */
        uarta: serial@70006000 {
                compatible = "nvidia,tegra20-uart";
index 313e260529a31283a4e0c01e0b4ac92b8f102112..c6edc8cea34ed79f77c377e819733d783b409191 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra30-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra30-hsuart", "nvidia,tegra20-hsuart".
         */
        uarta: serial@70006000 {
index 6e556be42ccdca53e1b109d8c664bb43dbfa0635..c4312c4a37678997b06211924c778df6abcc1b91 100644 (file)
 / {
        bl: backlight {
                compatible = "pwm-backlight";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_gpio_bl_on>;
                pwms = <&pwm0 0 5000000 0>;
+               enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
                status = "disabled";
        };
 };
                        >;
                };
 
+               pinctrl_gpio_bl_on: gpio_bl_on {
+                       fsl,pins = <
+                               VF610_PAD_PTC0__GPIO_45         0x22ef
+                       >;
+               };
+
                pinctrl_i2c0: i2c0grp {
                        fsl,pins = <
                                VF610_PAD_PTB14__I2C0_SCL               0x37ff
index a9ceb5bac40ef244dc6ca18602eef4072b87a57d..a5f07e3664da5a25ad86a376c74ef0869b272ba8 100644 (file)
@@ -16,6 +16,8 @@
        aliases {
                can0 = &can0;
                can1 = &can1;
+               ethernet0 = &fec0;
+               ethernet1 = &fec1;
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
                                status = "disabled";
                        };
 
+                       sai0: sai@4002f000 {
+                               compatible = "fsl,vf610-sai";
+                               reg = <0x4002f000 0x1000>;
+                               interrupts = <84 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks VF610_CLK_SAI0>,
+                                       <&clks VF610_CLK_SAI0_DIV>,
+                                       <&clks 0>, <&clks 0>;
+                               clock-names = "bus", "mclk1", "mclk2", "mclk3";
+                               dma-names = "tx", "rx";
+                               dmas = <&edma0 0 17>,
+                                       <&edma0 0 16>;
+                               status = "disabled";
+                       };
+
+                       sai1: sai@40030000 {
+                               compatible = "fsl,vf610-sai";
+                               reg = <0x40030000 0x1000>;
+                               interrupts = <85 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks VF610_CLK_SAI1>,
+                                       <&clks VF610_CLK_SAI1_DIV>,
+                                       <&clks 0>, <&clks 0>;
+                               clock-names = "bus", "mclk1", "mclk2", "mclk3";
+                               dma-names = "tx", "rx";
+                               dmas = <&edma0 0 19>,
+                                       <&edma0 0 18>;
+                               status = "disabled";
+                       };
+
                        sai2: sai@40031000 {
                                compatible = "fsl,vf610-sai";
                                reg = <0x40031000 0x1000>;
                                status = "disabled";
                        };
 
+                       sai3: sai@40032000 {
+                               compatible = "fsl,vf610-sai";
+                               reg = <0x40032000 0x1000>;
+                               interrupts = <87 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks VF610_CLK_SAI3>,
+                                       <&clks VF610_CLK_SAI3_DIV>,
+                                       <&clks 0>, <&clks 0>;
+                               clock-names = "bus", "mclk1", "mclk2", "mclk3";
+                               dma-names = "tx", "rx";
+                               dmas = <&edma0 1 9>,
+                                       <&edma0 1 8>;
+                               status = "disabled";
+                       };
+
                        pit: pit@40037000 {
                                compatible = "fsl,vf610-pit";
                                reg = <0x40037000 0x1000>;
index 2dc6da70ae598af4a43f3b8a9bde05d26d386c77..d3c0e69df2591fc8597325919972da4361079ce4 100644 (file)
@@ -16,7 +16,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
-
+#include <asm/div64.h>
 #include <asm/hardware/icst.h>
 
 /*
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
 
 unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
 {
-       return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+       u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+       u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+       do_div(dividend, divisor);
+       return (unsigned long)dividend;
 }
 
 EXPORT_SYMBOL(icst_hz);
index 24dcd2bb1215dd9b600fdeea5e59e5077a1de8b4..6ffd7e76f3ce0d96fc72b635d2615d4e38eaaf87 100644 (file)
@@ -26,12 +26,14 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -193,7 +195,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX8997=y
 CONFIG_RTC_DRV_MAX77686=y
-CONFIG_RTC_DRV_MAX77802=y
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
@@ -238,7 +239,12 @@ CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_DEV_S5P=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=m
+CONFIG_CRYPTO_SHA256_ARM=m
+CONFIG_CRYPTO_SHA512_ARM=m
+CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRC_CCITT=y
 CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
index 2d5253dcc2266174550d709771a28fb8b4161c70..25a6066493e46baf2ac8a99ae77c6515bdbf9aa8 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_IMX6=y
 CONFIG_SMP=y
+CONFIG_ARM_PSCI=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
@@ -320,6 +321,8 @@ CONFIG_IIO=y
 CONFIG_VF610_ADC=y
 CONFIG_PWM=y
 CONFIG_PWM_IMX=y
+CONFIG_NVMEM=y
+CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
index 314f6be2dca2b4aec6aaa82cc8cfb2ba06c2bb29..e28c660c35e9c0ca1fb97fcc68da828d45b83a36 100644 (file)
@@ -426,6 +426,7 @@ CONFIG_SUNXI_WATCHDOG=y
 CONFIG_IMX2_WDT=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
@@ -577,6 +578,7 @@ CONFIG_SND_SOC_WM8978=m
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
+CONFIG_USB_XHCI_RCAR=m
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_MSM=m
 CONFIG_USB_EHCI_EXYNOS=y
@@ -664,7 +666,6 @@ CONFIG_RTC_DRV_MAX8907=y
 CONFIG_RTC_DRV_MAX8997=m
 CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
-CONFIG_RTC_DRV_MAX77802=m
 CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_ST_LPC=y
index af29780accdc680e8ba9c6d3741fb8a940d318fa..9317e5a5b730c6f506918cb949ffe07de6add073 100644 (file)
@@ -137,6 +137,7 @@ CONFIG_SND=y
 CONFIG_SND_SOC=y
 CONFIG_SND_KIRKWOOD_SOC=y
 CONFIG_SND_SOC_ALC5623=y
+CONFIG_SND_SOC_CS42L51_I2C=y
 CONFIG_SND_SIMPLE_CARD=y
 CONFIG_HID_DRAGONRISE=y
 CONFIG_HID_GYRATION=y
index c6729bf0a8ddb5e272ee97690cd58c68b013b5fa..cf363abd974ec429b9d82d22c739ec080fa3e3ae 100644 (file)
@@ -109,6 +109,7 @@ CONFIG_USB_XHCI_MVEBU=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_STORAGE=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index b47e7c6628c9f8883322cffba9e97c12f6963098..1b2d9b377976820f802f01455da261d2d9b6876a 100644 (file)
@@ -141,6 +141,8 @@ CONFIG_IIO=y
 CONFIG_IIO_SYSFS_TRIGGER=y
 CONFIG_PWM=y
 CONFIG_PWM_MXS=y
+CONFIG_NVMEM=y
+CONFIG_NVMEM_MXS_OCOTP=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
index c5e1943e5427db037b98ce779566aeab1b1c9310..a7151744b85c17d8c7041f33141e7c4cf7809fec 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
 CONFIG_ARM_THUMBEE=y
+CONFIG_ARM_KERNMEM_PERMS=y
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_ARM_ERRATA_430973=y
 CONFIG_SMP=y
@@ -177,6 +178,7 @@ CONFIG_TI_CPTS=y
 CONFIG_AT803X_PHY=y
 CONFIG_SMSC_PHY=y
 CONFIG_USB_USBNET=m
+CONFIG_USB_NET_SMSC75XX=m
 CONFIG_USB_NET_SMSC95XX=m
 CONFIG_USB_ALI_M5632=y
 CONFIG_USB_AN2720=y
@@ -354,6 +356,11 @@ CONFIG_USB_MUSB_DSPS=m
 CONFIG_USB_INVENTRA_DMA=y
 CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_DWC3=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_TEST=m
 CONFIG_AM335X_PHY_USB=y
 CONFIG_USB_GADGET=m
@@ -387,6 +394,7 @@ CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_PWM=m
+CONFIG_LEDS_PCA963X=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_ONESHOT=m
@@ -449,6 +457,8 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_SPLIT=y
+CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
index 969738324a5d5f815263a12cf2f580098603cf4a..b7b714c3958c2fdad9bb8658680b7a31ee6d04e8 100644 (file)
@@ -20,7 +20,6 @@ CONFIG_ARCH_R8A7791=y
 CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
-CONFIG_CPU_BPREDICT_DISABLE=y
 CONFIG_PL310_ERRATA_588369=y
 CONFIG_ARM_ERRATA_754322=y
 CONFIG_PCI=y
@@ -163,6 +162,8 @@ CONFIG_SND_SOC_RSRC_CARD=y
 CONFIG_SND_SOC_AK4642=y
 CONFIG_SND_SOC_WM8978=y
 CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_RCAR=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_R8A66597_HCD=y
index e1f07764b0d6bf99e8ed9386a24a086fa82be1e1..7d919a9b32e5f6e251e1a42d2ccd8f7d33d62483 100644 (file)
@@ -74,7 +74,7 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
 static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
 {
        unsigned long long res;
-       unsigned int tmp = 0;
+       register unsigned int tmp asm("ip") = 0;
 
        if (!bias) {
                asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"
@@ -90,12 +90,12 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
                        : "r" (m), "r" (n)
                        : "cc");
        } else {
-               asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"
-                       "cmn    %Q0, %Q1\n\t"
-                       "adcs   %R0, %R0, %R1\n\t"
-                       "adc    %Q0, %3, #0"
-                       : "=&r" (res)
-                       : "r" (m), "r" (n), "r" (tmp)
+               asm (   "umull  %Q0, %R0, %Q2, %Q3\n\t"
+                       "cmn    %Q0, %Q2\n\t"
+                       "adcs   %R0, %R0, %R2\n\t"
+                       "adc    %Q0, %1, #0"
+                       : "=&r" (res), "+&r" (tmp)
+                       : "r" (m), "r" (n)
                        : "cc");
        }
 
index 194c91b610ffecfd4071da89d16b923c614bf68d..c35c349da06983b5eee05bc8bca52e526ed1bc52 100644 (file)
@@ -79,6 +79,8 @@
 #define rr_lo_hi(a1, a2) a1, a2
 #endif
 
+#define kvm_ksym_ref(kva)      (kva)
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
index c79b57bf71c40d1fc2083f67b198377936e0002f..49bf6b1e2177d6dc049baf014dc42a3971f6b6b0 100644 (file)
@@ -273,14 +273,14 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((phys_addr_t)(pfn) << PAGE_SHIFT)
 
-extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
+extern unsigned long (*arch_virt_to_idmap)(unsigned long x);
 
 /*
  * These are for systems that have a hardware interconnect supported alias of
  * physical memory for idmap purposes.  Most cases should leave these
- * untouched.
+ * untouched.  Note: this can only return addresses less than 4GiB.
  */
-static inline phys_addr_t __virt_to_idmap(unsigned long x)
+static inline unsigned long __virt_to_idmap(unsigned long x)
 {
        if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
                return arch_virt_to_idmap(x);
index a5635444ca410b49b5109a65535321d9c3df8d5c..d7de19a77d51186b7a673febfe0b0fc8752d2643 100644 (file)
@@ -3,8 +3,6 @@
 
 #ifdef __KERNEL__
 #include <asm-generic/pci-dma-compat.h>
-#include <asm-generic/pci-bridge.h>
-
 #include <asm/mach/pci.h> /* for pci_sys_data */
 
 extern unsigned long pcibios_min_io;
@@ -41,5 +39,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 }
 
 #endif /* __KERNEL__ */
 #endif
index 619d8cc1ac125f6c6038daef7a814e146cebebb1..92c44760d6569181d2dd6fd191d995b453cee572 100644 (file)
@@ -11,6 +11,7 @@
  *
  */
 
+#include <asm/assembler.h>
 #include "imx-uart.h"
 
 /*
@@ -34,6 +35,7 @@
                .endm
 
                .macro  senduart,rd,rx
+               ARM_BE8(rev \rd, \rd)
                str     \rd, [\rx, #0x40]       @ TXDATA
                .endm
 
@@ -42,6 +44,7 @@
 
                .macro  busyuart,rd,rx
 1002:          ldr     \rd, [\rx, #0x98]       @ SR2
+               ARM_BE8(rev \rd, \rd)
                tst     \rd, #1 << 3            @ TXDC
                beq     1002b                   @ wait until transmit done
                .endm
diff --git a/arch/arm/include/debug/palmchip.S b/arch/arm/include/debug/palmchip.S
new file mode 100644 (file)
index 0000000..6824b2d
--- /dev/null
@@ -0,0 +1,11 @@
+#include <linux/serial_reg.h>
+
+#undef UART_TX
+#undef UART_LSR
+#undef UART_MSR
+
+#define UART_TX 1
+#define UART_LSR 7
+#define UART_MSR 8
+
+#include <debug/8250.S>
index ede692ffa32ed14958f81eec0d7caac8f7efe853..5dd2528e9e45e369d0879185291e6981dbea15e4 100644 (file)
 #define __NR_userfaultfd               (__NR_SYSCALL_BASE+388)
 #define __NR_membarrier                        (__NR_SYSCALL_BASE+389)
 #define __NR_mlock2                    (__NR_SYSCALL_BASE+390)
+#define __NR_copy_file_range           (__NR_SYSCALL_BASE+391)
 
 /*
  * The following SWIs are ARM private.
index ac368bb068d1409af37a8e2585bb837b1ef1f02b..dfc7cd6851ad4bbd09b11ae94ee6056f691b1fcb 100644 (file)
                CALL(sys_userfaultfd)
                CALL(sys_membarrier)
                CALL(sys_mlock2)
+               CALL(sys_copy_file_range)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 3ce377f7251f3429668c2e2b563fcd8062c991ae..788e40c1254f65dfd1d942497e163f88ce40f2ec 100644 (file)
@@ -1064,7 +1064,6 @@ ENDPROC(vector_\name)
        .endm
 
        .section .stubs, "ax", %progbits
-__stubs_start:
        @ This must be the first word
        .word   vector_swi
 
@@ -1206,10 +1205,10 @@ vector_addrexcptn:
        .equ    vector_fiq_offset, vector_fiq
 
        .section .vectors, "ax", %progbits
-__vectors_start:
+.L__vectors_start:
        W(b)    vector_rst
        W(b)    vector_und
-       W(ldr)  pc, __vectors_start + 0x1000
+       W(ldr)  pc, .L__vectors_start + 0x1000
        W(b)    vector_pabt
        W(b)    vector_dabt
        W(b)    vector_addrexcptn
index a71501ff6f1877fc9813fd4c3e10c51ae36f04e7..b09561a6d06a00eb9029fc9916ba1f654ca7e1c2 100644 (file)
@@ -62,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
 
        ret = swsusp_save();
        if (ret == 0)
-               _soft_restart(virt_to_phys(cpu_resume), false);
+               _soft_restart(virt_to_idmap(cpu_resume), false);
        return ret;
 }
 
@@ -87,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
        for (pbe = restore_pblist; pbe; pbe = pbe->next)
                copy_page(pbe->orig_address, pbe->address);
 
-       _soft_restart(virt_to_phys(cpu_resume), false);
+       _soft_restart(virt_to_idmap(cpu_resume), false);
 }
 
 static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
index 1d45320ee125d572b108d8e40bb6e0150fea8b9a..ece04a457486c5998d312bce4f3c69b97e0e7b64 100644 (file)
@@ -95,7 +95,7 @@ void __init init_IRQ(void)
                        outer_cache.write_sec = machine_desc->l2c_write_sec;
                ret = l2x0_of_init(machine_desc->l2c_aux_val,
                                   machine_desc->l2c_aux_mask);
-               if (ret)
+               if (ret && ret != -ENODEV)
                        pr_err("L2C: failed to init: %d\n", ret);
        }
 
index 8bf3b7c098881b951df038575c6baf14e84df7b9..59fd0e24c56b150a1f22ab21983d72022fe52701 100644 (file)
@@ -143,10 +143,8 @@ void (*kexec_reinit)(void);
 
 void machine_kexec(struct kimage *image)
 {
-       unsigned long page_list;
-       unsigned long reboot_code_buffer_phys;
-       unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
-       unsigned long reboot_entry_phys;
+       unsigned long page_list, reboot_entry_phys;
+       void (*reboot_entry)(void);
        void *reboot_code_buffer;
 
        /*
@@ -159,9 +157,6 @@ void machine_kexec(struct kimage *image)
 
        page_list = image->head & PAGE_MASK;
 
-       /* we need both effective and real address here */
-       reboot_code_buffer_phys =
-           page_to_pfn(image->control_code_page) << PAGE_SHIFT;
        reboot_code_buffer = page_address(image->control_code_page);
 
        /* Prepare parameters for reboot_code_buffer*/
@@ -174,10 +169,11 @@ void machine_kexec(struct kimage *image)
 
        /* copy our kernel relocation code to the control code page */
        reboot_entry = fncpy(reboot_code_buffer,
-                            reboot_entry,
+                            &relocate_new_kernel,
                             relocate_new_kernel_size);
-       reboot_entry_phys = (unsigned long)reboot_entry +
-               (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
+
+       /* get the identity mapping physical address for the reboot code */
+       reboot_entry_phys = virt_to_idmap(reboot_entry);
 
        pr_info("Bye!\n");
 
index 38269358fd252c6bb93fd58a0478319c436cdfd3..71a2ff9ec4900c58677f12114c85c82e8cfaa575 100644 (file)
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
        flush_cache_all();
 
        /* Switch to the identity mapping. */
-       phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
+       phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
        phys_reset((unsigned long)addr);
 
        /* Should never get here. */
index 08b7847bf9124f004d7214c0e9c4dae23c0fffd2..ec279d161b3287e1df5b51702c499a2ac0187054 100644 (file)
@@ -40,7 +40,7 @@
  * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  * updated during this sequence.
  */
-static DEFINE_PER_CPU(unsigned long, cpu_scale);
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
 
 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
@@ -306,8 +306,6 @@ void __init init_cpu_topology(void)
                cpu_topo->socket_id = -1;
                cpumask_clear(&cpu_topo->core_sibling);
                cpumask_clear(&cpu_topo->thread_sibling);
-
-               set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
        }
        smp_wmb();
 
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
new file mode 100644 (file)
index 0000000..6f59ef2
--- /dev/null
@@ -0,0 +1,322 @@
+/* ld script to make ARM Linux kernel
+ * taken from the i386 version by Russell King
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#ifdef CONFIG_DEBUG_RODATA
+#include <asm/pgtable.h>
+#endif
+
+#define PROC_INFO                                                      \
+       . = ALIGN(4);                                                   \
+       VMLINUX_SYMBOL(__proc_info_begin) = .;                          \
+       *(.proc.info.init)                                              \
+       VMLINUX_SYMBOL(__proc_info_end) = .;
+
+#define IDMAP_TEXT                                                     \
+       ALIGN_FUNCTION();                                               \
+       VMLINUX_SYMBOL(__idmap_text_start) = .;                         \
+       *(.idmap.text)                                                  \
+       VMLINUX_SYMBOL(__idmap_text_end) = .;                           \
+       . = ALIGN(PAGE_SIZE);                                           \
+       VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;                     \
+       *(.hyp.idmap.text)                                              \
+       VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x)                x
+#else
+#define ARM_CPU_DISCARD(x)     x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+       defined(CONFIG_GENERIC_BUG)
+#define ARM_EXIT_KEEP(x)       x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x)    x
+#endif
+
+OUTPUT_ARCH(arm)
+ENTRY(stext)
+
+#ifndef __ARMEB__
+jiffies = jiffies_64;
+#else
+jiffies = jiffies_64 + 4;
+#endif
+
+SECTIONS
+{
+       /*
+        * XXX: The linker does not define how output sections are
+        * assigned to input sections when there are multiple statements
+        * matching the same input section name.  There is no documented
+        * order of matching.
+        *
+        * unwind exit sections must be discarded before the rest of the
+        * unwind sections get included.
+        */
+       /DISCARD/ : {
+               *(.ARM.exidx.exit.text)
+               *(.ARM.extab.exit.text)
+               ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
+               ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
+               ARM_EXIT_DISCARD(EXIT_TEXT)
+               ARM_EXIT_DISCARD(EXIT_DATA)
+               EXIT_CALL
+#ifndef CONFIG_MMU
+               *(.text.fixup)
+               *(__ex_table)
+#endif
+#ifndef CONFIG_SMP_ON_UP
+               *(.alt.smp.init)
+#endif
+               *(.discard)
+               *(.discard.*)
+       }
+
+       . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
+
+       .head.text : {
+               _text = .;
+               HEAD_TEXT
+       }
+
+#ifdef CONFIG_DEBUG_RODATA
+       . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
+       .text : {                       /* Real text segment            */
+               _stext = .;             /* Text and read-only data      */
+                       IDMAP_TEXT
+                       __exception_text_start = .;
+                       *(.exception.text)
+                       __exception_text_end = .;
+                       IRQENTRY_TEXT
+                       TEXT_TEXT
+                       SCHED_TEXT
+                       LOCK_TEXT
+                       KPROBES_TEXT
+                       *(.gnu.warning)
+                       *(.glue_7)
+                       *(.glue_7t)
+               . = ALIGN(4);
+               *(.got)                 /* Global offset table          */
+                       ARM_CPU_KEEP(PROC_INFO)
+       }
+
+#ifdef CONFIG_DEBUG_RODATA
+       . = ALIGN(1<<SECTION_SHIFT);
+#endif
+       RO_DATA(PAGE_SIZE)
+
+       . = ALIGN(4);
+       __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+               __start___ex_table = .;
+#ifdef CONFIG_MMU
+               *(__ex_table)
+#endif
+               __stop___ex_table = .;
+       }
+
+#ifdef CONFIG_ARM_UNWIND
+       /*
+        * Stack unwinding tables
+        */
+       . = ALIGN(8);
+       .ARM.unwind_idx : {
+               __start_unwind_idx = .;
+               *(.ARM.exidx*)
+               __stop_unwind_idx = .;
+       }
+       .ARM.unwind_tab : {
+               __start_unwind_tab = .;
+               *(.ARM.extab*)
+               __stop_unwind_tab = .;
+       }
+#endif
+
+       NOTES
+
+       _etext = .;                     /* End of text and rodata section */
+
+       /*
+        * The vectors and stubs are relocatable code, and the
+        * only thing that matters is their relative offsets
+        */
+       __vectors_start = .;
+       .vectors 0 : AT(__vectors_start) {
+               *(.vectors)
+       }
+       . = __vectors_start + SIZEOF(.vectors);
+       __vectors_end = .;
+
+       __stubs_start = .;
+       .stubs 0x1000 : AT(__stubs_start) {
+               *(.stubs)
+       }
+       . = __stubs_start + SIZEOF(.stubs);
+       __stubs_end = .;
+
+       INIT_TEXT_SECTION(8)
+       .exit.text : {
+               ARM_EXIT_KEEP(EXIT_TEXT)
+       }
+       .init.proc.info : {
+               ARM_CPU_DISCARD(PROC_INFO)
+       }
+       .init.arch.info : {
+               __arch_info_begin = .;
+               *(.arch.info.init)
+               __arch_info_end = .;
+       }
+       .init.tagtable : {
+               __tagtable_begin = .;
+               *(.taglist.init)
+               __tagtable_end = .;
+       }
+#ifdef CONFIG_SMP_ON_UP
+       .init.smpalt : {
+               __smpalt_begin = .;
+               *(.alt.smp.init)
+               __smpalt_end = .;
+       }
+#endif
+       .init.pv_table : {
+               __pv_table_begin = .;
+               *(.pv_table)
+               __pv_table_end = .;
+       }
+       .init.data : {
+               INIT_SETUP(16)
+               INIT_CALLS
+               CON_INITCALL
+               SECURITY_INITCALL
+               INIT_RAM_FS
+       }
+
+#ifdef CONFIG_SMP
+       PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
+
+       __data_loc = ALIGN(4);          /* location in binary */
+       . = PAGE_OFFSET + TEXT_OFFSET;
+
+       .data : AT(__data_loc) {
+               _data = .;              /* address in memory */
+               _sdata = .;
+
+               /*
+                * first, the init task union, aligned
+                * to an 8192 byte boundary.
+                */
+               INIT_TASK_DATA(THREAD_SIZE)
+
+               . = ALIGN(PAGE_SIZE);
+               __init_begin = .;
+               INIT_DATA
+               ARM_EXIT_KEEP(EXIT_DATA)
+               . = ALIGN(PAGE_SIZE);
+               __init_end = .;
+
+               NOSAVE_DATA
+               CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+               READ_MOSTLY_DATA(L1_CACHE_BYTES)
+
+               /*
+                * and the usual data section
+                */
+               DATA_DATA
+               CONSTRUCTORS
+
+               _edata = .;
+       }
+       _edata_loc = __data_loc + SIZEOF(.data);
+
+#ifdef CONFIG_HAVE_TCM
+        /*
+        * We align everything to a page boundary so we can
+        * free it after init has commenced and TCM contents have
+        * been copied to its destination.
+        */
+       .tcm_start : {
+               . = ALIGN(PAGE_SIZE);
+               __tcm_start = .;
+               __itcm_start = .;
+       }
+
+       /*
+        * Link these to the ITCM RAM
+        * Put VMA to the TCM address and LMA to the common RAM
+        * and we'll upload the contents from RAM to TCM and free
+        * the used RAM after that.
+        */
+       .text_itcm ITCM_OFFSET : AT(__itcm_start)
+       {
+               __sitcm_text = .;
+               *(.tcm.text)
+               *(.tcm.rodata)
+               . = ALIGN(4);
+               __eitcm_text = .;
+       }
+
+       /*
+        * Reset the dot pointer, this is needed to create the
+        * relative __dtcm_start below (to be used as extern in code).
+        */
+       . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
+
+       .dtcm_start : {
+               __dtcm_start = .;
+       }
+
+       /* TODO: add remainder of ITCM as well, that can be used for data! */
+       .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
+       {
+               . = ALIGN(4);
+               __sdtcm_data = .;
+               *(.tcm.data)
+               . = ALIGN(4);
+               __edtcm_data = .;
+       }
+
+       /* Reset the dot pointer or the linker gets confused */
+       . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
+
+       /* End marker for freeing TCM copy in linked object */
+       .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
+               . = ALIGN(PAGE_SIZE);
+               __tcm_end = .;
+       }
+#endif
+
+       BSS_SECTION(0, 0, 0)
+       _end = .;
+
+       STABS_DEBUG
+}
+
+/*
+ * These must never be empty
+ * If you have to comment these two assert statements out, your
+ * binutils is too old (for other reasons as well)
+ */
+ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
+ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
+
+/*
+ * The HYP init code can't be more than a page long,
+ * and should not cross a page boundary.
+ * The above comment applies as well.
+ */
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
+       "HYP init code too big or misaligned")
index 8b60fde5ce48a628e5d1f2c682d2aa0684ed6642..cdc84693091b82cde1f0c0c539c848dfcaea6738 100644 (file)
@@ -3,12 +3,16 @@
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  */
 
+#ifdef CONFIG_XIP_KERNEL
+#include "vmlinux-xip.lds.S"
+#else
+
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
 #include <asm/pgtable.h>
 #endif
 
@@ -84,17 +88,13 @@ SECTIONS
                *(.discard.*)
        }
 
-#ifdef CONFIG_XIP_KERNEL
-       . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
-#else
        . = PAGE_OFFSET + TEXT_OFFSET;
-#endif
        .head.text : {
                _text = .;
                HEAD_TEXT
        }
 
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #endif
 
@@ -117,7 +117,7 @@ SECTIONS
                        ARM_CPU_KEEP(PROC_INFO)
        }
 
-#ifdef CONFIG_DEBUG_RODATA
+#ifdef CONFIG_DEBUG_ALIGN_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #endif
        RO_DATA(PAGE_SIZE)
@@ -152,14 +152,13 @@ SECTIONS
 
        _etext = .;                     /* End of text and rodata section */
 
-#ifndef CONFIG_XIP_KERNEL
-# ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
-# else
+#else
        . = ALIGN(PAGE_SIZE);
-# endif
-       __init_begin = .;
 #endif
+       __init_begin = .;
+
        /*
         * The vectors and stubs are relocatable code, and the
         * only thing that matters is their relative offsets
@@ -208,37 +207,28 @@ SECTIONS
                __pv_table_end = .;
        }
        .init.data : {
-#ifndef CONFIG_XIP_KERNEL
                INIT_DATA
-#endif
                INIT_SETUP(16)
                INIT_CALLS
                CON_INITCALL
                SECURITY_INITCALL
                INIT_RAM_FS
        }
-#ifndef CONFIG_XIP_KERNEL
        .exit.data : {
                ARM_EXIT_KEEP(EXIT_DATA)
        }
-#endif
 
 #ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
 #endif
 
-#ifdef CONFIG_XIP_KERNEL
-       __data_loc = ALIGN(4);          /* location in binary */
-       . = PAGE_OFFSET + TEXT_OFFSET;
-#else
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #else
        . = ALIGN(THREAD_SIZE);
 #endif
        __init_end = .;
        __data_loc = .;
-#endif
 
        .data : AT(__data_loc) {
                _data = .;              /* address in memory */
@@ -250,15 +240,6 @@ SECTIONS
                 */
                INIT_TASK_DATA(THREAD_SIZE)
 
-#ifdef CONFIG_XIP_KERNEL
-               . = ALIGN(PAGE_SIZE);
-               __init_begin = .;
-               INIT_DATA
-               ARM_EXIT_KEEP(EXIT_DATA)
-               . = ALIGN(PAGE_SIZE);
-               __init_end = .;
-#endif
-
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
                READ_MOSTLY_DATA(L1_CACHE_BYTES)
@@ -351,3 +332,5 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
  */
 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
        "HYP init code too big or misaligned")
+
+#endif /* CONFIG_XIP_KERNEL */
index dda1959f0ddeb947e8a8020d7da0b02bb19f89cc..975da6cfbf5917e604b576027189469d95cd4a52 100644 (file)
@@ -982,7 +982,7 @@ static void cpu_init_hyp_mode(void *dummy)
        pgd_ptr = kvm_mmu_get_httbr();
        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
        hyp_stack_ptr = stack_page + PAGE_SIZE;
-       vector_ptr = (unsigned long)__kvm_hyp_vector;
+       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
 
        __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
 
@@ -1074,13 +1074,15 @@ static int init_hyp_mode(void)
        /*
         * Map the Hyp-code called directly from the host
         */
-       err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+       err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
+                                 kvm_ksym_ref(__kvm_hyp_code_end));
        if (err) {
                kvm_err("Cannot map world-switch code\n");
                goto out_free_mappings;
        }
 
-       err = create_hyp_mappings(__start_rodata, __end_rodata);
+       err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
+                                 kvm_ksym_ref(__end_rodata));
        if (err) {
                kvm_err("Cannot map rodata section\n");
                goto out_free_mappings;
diff --git a/arch/arm/mach-cns3xxx/Makefile.boot b/arch/arm/mach-cns3xxx/Makefile.boot
deleted file mode 100644 (file)
index d079de0..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00C00000
index 652a0bb11578927fc0bbc58983bf5aaafd7eb0d3..aeadd2aa12cf6ca9ddab697f3afc8d61ec4c135f 100644 (file)
@@ -17,6 +17,8 @@ menuconfig ARCH_EXYNOS
        select ARM_GIC
        select COMMON_CLK_SAMSUNG
        select EXYNOS_THERMAL
+       select EXYNOS_PMU
+       select EXYNOS_SROM
        select HAVE_ARM_SCU if SMP
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C2410_WATCHDOG if WATCHDOG
@@ -25,6 +27,7 @@ menuconfig ARCH_EXYNOS
        select PINCTRL_EXYNOS
        select PM_GENERIC_DOMAINS if PM
        select S5P_DEV_MFC
+       select SOC_SAMSUNG
        select SRAM
        select THERMAL
        select MFD_SYSCON
index 2f306767cdfe425b8f522a20fd756310bc5e2fad..34d29df3e0062342af5510d6a46addbe4fb21083 100644 (file)
@@ -9,7 +9,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)
 
 # Core
 
-obj-$(CONFIG_ARCH_EXYNOS)      += exynos.o pmu.o exynos-smc.o firmware.o
+obj-$(CONFIG_ARCH_EXYNOS)      += exynos.o exynos-smc.o firmware.o
 
 obj-$(CONFIG_EXYNOS_CPU_SUSPEND) += pm.o sleep.o
 obj-$(CONFIG_PM_SLEEP)         += suspend.o
diff --git a/arch/arm/mach-exynos/Makefile.boot b/arch/arm/mach-exynos/Makefile.boot
deleted file mode 100644 (file)
index b9862e2..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-   zreladdr-y  += 0x40008000
-params_phys-y  := 0x40000100
index 1c47aee31e9cc60aeabc8c504b41c76c2379a435..99947ad53ddfd983052d140c00bbcebbbf82848f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/irqchip.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "common.h"
 #include "mfc.h"
-#include "regs-pmu.h"
-
-void __iomem *pmu_base_addr;
 
 static struct map_desc exynos4_iodesc[] __initdata = {
        {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
                .virtual        = (unsigned long)S5P_VA_CMU,
                .pfn            = __phys_to_pfn(EXYNOS4_PA_CMU),
                .length         = SZ_128K,
@@ -64,20 +57,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
        },
 };
 
-static struct map_desc exynos5_iodesc[] __initdata = {
-       {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS5_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = (unsigned long)S5P_VA_CMU,
-               .pfn            = __phys_to_pfn(EXYNOS5_PA_CMU),
-               .length         = 144 * SZ_1K,
-               .type           = MT_DEVICE,
-       },
-};
-
 static struct platform_device exynos_cpuidle = {
        .name              = "exynos_cpuidle",
 #ifdef CONFIG_ARM_EXYNOS_CPUIDLE
@@ -149,9 +128,6 @@ static void __init exynos_map_io(void)
 {
        if (soc_is_exynos4())
                iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
-
-       if (soc_is_exynos5())
-               iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
 }
 
 static void __init exynos_init_io(void)
@@ -230,6 +206,10 @@ static const struct of_device_id exynos_cpufreq_matches[] = {
        { .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
        { .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
        { .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
+#ifndef CONFIG_BL_SWITCHER
+       { .compatible = "samsung,exynos5420", .data = "cpufreq-dt" },
+       { .compatible = "samsung,exynos5800", .data = "cpufreq-dt" },
+#endif
        { /* sentinel */ }
 };
 
index de3ae59e1cfbbb4d4d8248224af62243cb23b548..351e839fcb040174adc72bcf3af44957eb75225f 100644 (file)
@@ -25,7 +25,6 @@
 #define EXYNOS_PA_CHIPID               0x10000000
 
 #define EXYNOS4_PA_CMU                 0x10030000
-#define EXYNOS5_PA_CMU                 0x10010000
 
 #define EXYNOS4_PA_DMC0                        0x10400000
 #define EXYNOS4_PA_DMC1                        0x10410000
 #define EXYNOS4_PA_COREPERI            0x10500000
 #define EXYNOS4_PA_L2CC                        0x10502000
 
-#define EXYNOS4_PA_SROMC               0x12570000
-#define EXYNOS5_PA_SROMC               0x12250000
-
-/* Compatibility UART */
-
-#define EXYNOS5440_PA_UART0            0x000B0000
-
 #endif /* __ASM_ARCH_MAP_H */
index 56978199c4798fa236394c232e50d58a61e4fd3d..f086bf615b2972ee640e61256cba3438c9c9ae2a 100644 (file)
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/syscore_ops.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cputype.h>
 #include <asm/cp15.h>
 #include <asm/mcpm.h>
 #include <asm/smp_plat.h>
 
-#include "regs-pmu.h"
 #include "common.h"
 
 #define EXYNOS5420_CPUS_PER_CLUSTER    4
index 5bd9559786ba77d2eade9c29ab1983e8746f4e54..da46c639f62128024503583225e312de96fc71fb 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cp15.h>
@@ -30,7 +31,6 @@
 #include <mach/map.h>
 
 #include "common.h"
-#include "regs-pmu.h"
 
 extern void exynos4_secondary_startup(void);
 
index 9c1506b499bca6f4599a20ece67494078a95d4d0..b9b9186f878148d604616eea5344e0f88e3d3b5a 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/cpu_pm.h>
 #include <linux/io.h>
 #include <linux/err.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
 
 #include <asm/firmware.h>
 #include <asm/smp_scu.h>
@@ -29,8 +31,6 @@
 #include <plat/pm-common.h>
 
 #include "common.h"
-#include "exynos-pmu.h"
-#include "regs-pmu.h"
 
 static inline void __iomem *exynos_boot_vector_addr(void)
 {
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
deleted file mode 100644 (file)
index dbf9fe9..0000000
+++ /dev/null
@@ -1,967 +0,0 @@
-/*
- * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * EXYNOS - CPU PMU(Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-
-#include <asm/cputype.h>
-
-#include "exynos-pmu.h"
-#include "regs-pmu.h"
-
-#define PMU_TABLE_END  (-1U)
-
-struct exynos_pmu_conf {
-       unsigned int offset;
-       u8 val[NUM_SYS_POWERDOWN];
-};
-
-struct exynos_pmu_data {
-       const struct exynos_pmu_conf *pmu_config;
-       const struct exynos_pmu_conf *pmu_config_extra;
-
-       void (*pmu_init)(void);
-       void (*powerdown_conf)(enum sys_powerdown);
-       void (*powerdown_conf_extra)(enum sys_powerdown);
-};
-
-struct exynos_pmu_context {
-       struct device *dev;
-       const struct exynos_pmu_data *pmu_data;
-};
-
-static void __iomem *pmu_base_addr;
-static struct exynos_pmu_context *pmu_context;
-
-static inline void pmu_raw_writel(u32 val, u32 offset)
-{
-       writel_relaxed(val, pmu_base_addr + offset);
-}
-
-static inline u32 pmu_raw_readl(u32 offset)
-{
-       return readl_relaxed(pmu_base_addr + offset);
-}
-
-static struct exynos_pmu_conf exynos3250_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
-       { EXYNOS3_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
-       { EXYNOS3_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
-       { EXYNOS3_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS3_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
-       { EXYNOS3_ARM_L2_SYS_PWR_REG,                   { 0x0, 0x0, 0x3} },
-       { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
-       { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
-       { EXYNOS3_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
-       { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG,       { 0x1, 0x1, 0x1} },
-       { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
-       { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS3_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS3_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS3_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x3, 0x3} },
-       { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG,          { 0x3, 0x0, 0x0} },
-       { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x1} },
-       { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG,          { 0x3, 0x3, 0x3} },
-       { EXYNOS3_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
-       { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG,      { 0x1, 0x1, 0x0} },
-       { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
-       { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG,     { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS3_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x0} },
-       { EXYNOS3_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
-       { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
-       { EXYNOS3_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
-       { EXYNOS3_CAM_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_LCD0_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
-       { EXYNOS3_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_MAUDIO_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
-       { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
-       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
-       { S5P_L2_0_LOWPWR,                      { 0x2, 0x2, 0x3 } },
-       { S5P_L2_1_LOWPWR,                      { 0x2, 0x2, 0x3 } },
-       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
-       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_LCD1_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_LCD1_LOWPWR,            { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
-       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
-       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
-       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_MODIMIF_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
-       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_PCIE_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
-       { S5P_SATA_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
-       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
-       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
-       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
-       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
-       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
-       { S5P_LCD1_LOWPWR,                      { 0x7, 0x0, 0x0 } },
-       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
-       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
-       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ISP_ARM_LOWPWR,                   { 0x1, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR,     { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR,   { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
-       { S5P_L2_0_LOWPWR,                      { 0x0, 0x0, 0x3 } },
-       /* XXX_OPTION register should be set other field */
-       { S5P_ARM_L2_0_OPTION,                  { 0x10, 0x10, 0x0 } },
-       { S5P_L2_1_LOWPWR,                      { 0x0, 0x0, 0x3 } },
-       { S5P_ARM_L2_1_OPTION,                  { 0x10, 0x10, 0x0 } },
-       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
-       { S5P_DRAM_FREQ_DOWN_LOWPWR,            { 0x1, 0x1, 0x1 } },
-       { S5P_DDRPHY_DLLOFF_LOWPWR,             { 0x1, 0x1, 0x1 } },
-       { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR,        { 0x1, 0x1, 0x1 } },
-       { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_COREBLK_LOWPWR,         { 0x1, 0x1, 0x0 } },
-       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_MPLLUSER_SYSCLK_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_ISP_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_ISP_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
-       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
-       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
-       { S5P_TOP_BUS_COREBLK_LOWPWR,           { 0x3, 0x0, 0x0 } },
-       { S5P_TOP_RETENTION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x1 } },
-       { S5P_TOP_PWR_COREBLK_LOWPWR,           { 0x3, 0x0, 0x3 } },
-       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_OSCCLK_GATE_LOWPWR,               { 0x1, 0x0, 0x1 } },
-       { S5P_LOGIC_RESET_COREBLK_LOWPWR,       { 0x1, 0x1, 0x0 } },
-       { S5P_OSCCLK_GATE_COREBLK_LOWPWR,       { 0x1, 0x0, 0x1 } },
-       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_ONENAND_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
-       { S5P_HSI_MEM_LOWPWR,                   { 0x3, 0x0, 0x0 } },
-       { S5P_HSI_MEM_OPTION,                   { 0x10, 0x10, 0x0 } },
-       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_G2D_ACP_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
-       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
-       { S5P_USBOTG_MEM_OPTION,                { 0x10, 0x10, 0x0 } },
-       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_HSMMC_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
-       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_CSSYS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
-       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_SECSS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
-       { S5P_ROTATOR_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_ROTATOR_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
-       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
-       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR,{ 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_ISOLATION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
-       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
-       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
-       { S5P_GPIO_MODE_COREBLK_LOWPWR,         { 0x1, 0x0, 0x0 } },
-       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_TOP_ASB_RESET_LOWPWR,             { 0x1, 0x1, 0x1 } },
-       { S5P_TOP_ASB_ISOLATION_LOWPWR,         { 0x1, 0x0, 0x1 } },
-       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
-       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
-       { S5P_ISP_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
-       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
-       { S5P_CMU_SYSCLK_ISP_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SYSCLK_GPS_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
-       { S5P_ARM_CORE2_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE2,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL2,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_CORE3_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE3,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL3,                 { 0x0, 0x0, 0x0 } },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
-       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_FSYS_ARM_SYS_PWR_REG,                 { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
-       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
-       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x3, 0x3, 0x3} },
-       { EXYNOS5_ARM_L2_OPTION,                        { 0x10, 0x10, 0x0 } },
-       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
-       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
-       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
-       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x0, 0x1} },
-       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x0, 0x3} },
-       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
-       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x3} },
-       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
-       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x1} },
-       { EXYNOS5_USBOTG_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_G2D_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS5_USBDRD_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_SDMMC_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
-       { EXYNOS5_CSSYS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
-       { EXYNOS5_SECSS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
-       { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,              { 0x3, 0x0, 0x0} },
-       { EXYNOS5_INTRAM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_INTROM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_JPEG_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
-       { EXYNOS5_JPEG_MEM_OPTION,                      { 0x10, 0x10, 0x0} },
-       { EXYNOS5_HSI_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_SATA_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x1} },
-       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
-       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x1} },
-       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
-       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS5_DISP1_SYS_PWR_REG,                    { 0x7, 0x0, 0x0} },
-       { EXYNOS5_MAU_SYS_PWR_REG,                      { 0x7, 0x7, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,          { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,           { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { PMU_TABLE_END,},
-};
-
-static struct exynos_pmu_conf exynos5420_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
-       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_ARM_CORE2_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_ARM_CORE3_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE0_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE1_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE2_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE3_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_ARM_COMMON_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_COMMON_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_L2_SYS_PWR_REG,                           { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_L2_SYS_PWR_REG,                        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,                     { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,                     { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                        { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,                 { 0x1, 0x1, 0x0} },
-       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,                   { 0x1, 0x0, 0x1} },
-       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,                    { 0x1, 0x1, 0x1} },
-       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,                   { 0x1, 0x0, 0x1} },
-       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x1, 0x0} },
-       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                          { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,                    { 0x1, 0x1, 0x1} },
-       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                          { 0x3, 0x3, 0x0} },
-       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,                   { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
-       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,                   { 0x3, 0x0, 0x0} },
-       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,                      { 0x1, 0x1, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,                      { 0x1, 0x0, 0x1} },
-       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,               { 0x1, 0x0, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,               { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG,                    { 0x3, 0x0, 0x3} },
-       { EXYNOS5420_INTROM_MEM_SYS_PWR_REG,                    { 0x3, 0x0, 0x3} },
-       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,               { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                          { 0x1, 0x1, 0x0} },
-       { EXYNOS5_XXTI_SYS_PWR_REG,                             { 0x1, 0x1, 0x0} },
-       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,                 { 0x1, 0x1, 0x0} },
-       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GSCL_SYS_PWR_REG,                             { 0x7, 0x0, 0x0} },
-       { EXYNOS5_ISP_SYS_PWR_REG,                              { 0x7, 0x0, 0x0} },
-       { EXYNOS5_MFC_SYS_PWR_REG,                              { 0x7, 0x0, 0x0} },
-       { EXYNOS5_G3D_SYS_PWR_REG,                              { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_DISP1_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_MAU_SYS_PWR_REG,                           { 0x7, 0x7, 0x0} },
-       { EXYNOS5420_G2D_SYS_PWR_REG,                           { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_MSC_SYS_PWR_REG,                           { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_FSYS_SYS_PWR_REG,                          { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_FSYS2_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_PSGEN_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_PERIC_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_WCORE_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { PMU_TABLE_END,},
-};
-
-static unsigned int const exynos3250_list_feed[] = {
-       EXYNOS3_ARM_CORE_OPTION(0),
-       EXYNOS3_ARM_CORE_OPTION(1),
-       EXYNOS3_ARM_CORE_OPTION(2),
-       EXYNOS3_ARM_CORE_OPTION(3),
-       EXYNOS3_ARM_COMMON_OPTION,
-       EXYNOS3_TOP_PWR_OPTION,
-       EXYNOS3_CORE_TOP_PWR_OPTION,
-       S5P_CAM_OPTION,
-       S5P_MFC_OPTION,
-       S5P_G3D_OPTION,
-       S5P_LCD0_OPTION,
-       S5P_ISP_OPTION,
-};
-
-static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
-{
-       unsigned int i;
-       unsigned int tmp;
-
-       /* Enable only SC_FEEDBACK */
-       for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
-               tmp = pmu_raw_readl(exynos3250_list_feed[i]);
-               tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
-               tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
-               pmu_raw_writel(tmp, exynos3250_list_feed[i]);
-       }
-
-       if (mode != SYS_SLEEP)
-               return;
-
-       pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
-       pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
-       pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
-       pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
-                      EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
-}
-
-static unsigned int const exynos5_list_both_cnt_feed[] = {
-       EXYNOS5_ARM_CORE0_OPTION,
-       EXYNOS5_ARM_CORE1_OPTION,
-       EXYNOS5_ARM_COMMON_OPTION,
-       EXYNOS5_GSCL_OPTION,
-       EXYNOS5_ISP_OPTION,
-       EXYNOS5_MFC_OPTION,
-       EXYNOS5_G3D_OPTION,
-       EXYNOS5_DISP1_OPTION,
-       EXYNOS5_MAU_OPTION,
-       EXYNOS5_TOP_PWR_OPTION,
-       EXYNOS5_TOP_PWR_SYSMEM_OPTION,
-};
-
-static unsigned int const exynos5_list_disable_wfi_wfe[] = {
-       EXYNOS5_ARM_CORE1_OPTION,
-       EXYNOS5_FSYS_ARM_OPTION,
-       EXYNOS5_ISP_ARM_OPTION,
-};
-
-static unsigned int const exynos5420_list_disable_pmu_reg[] = {
-       EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
-       EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
-       EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
-       EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
-       EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
-       EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
-       EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
-       EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
-       EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
-};
-
-static void exynos5420_powerdown_conf(enum sys_powerdown mode)
-{
-       u32 this_cluster;
-
-       this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
-
-       /*
-        * set the cluster id to IROM register to ensure that we wake
-        * up with the current cluster.
-        */
-       pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
-}
-
-
-static void exynos5_powerdown_conf(enum sys_powerdown mode)
-{
-       unsigned int i;
-       unsigned int tmp;
-
-       /*
-        * Enable both SC_FEEDBACK and SC_COUNTER
-        */
-       for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
-               tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
-               tmp |= (EXYNOS5_USE_SC_FEEDBACK |
-                       EXYNOS5_USE_SC_COUNTER);
-               pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
-       }
-
-       /*
-        * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
-        */
-       tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
-       tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
-
-       /*
-        * Disable WFI/WFE on XXX_OPTION
-        */
-       for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
-               tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
-               tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
-                        EXYNOS5_OPTION_USE_STANDBYWFI);
-               pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
-       }
-}
-
-void exynos_sys_powerdown_conf(enum sys_powerdown mode)
-{
-       unsigned int i;
-       const struct exynos_pmu_data *pmu_data;
-
-       if (!pmu_context)
-               return;
-
-       pmu_data = pmu_context->pmu_data;
-
-       if (pmu_data->powerdown_conf)
-               pmu_data->powerdown_conf(mode);
-
-       if (pmu_data->pmu_config) {
-               for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
-                       pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
-                                       pmu_data->pmu_config[i].offset);
-       }
-
-       if (pmu_data->powerdown_conf_extra)
-               pmu_data->powerdown_conf_extra(mode);
-
-       if (pmu_data->pmu_config_extra) {
-               for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
-                       pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
-                                       pmu_data->pmu_config_extra[i].offset);
-       }
-}
-
-static void exynos3250_pmu_init(void)
-{
-       unsigned int value;
-
-       /*
-        * To prevent from issuing new bus request form L2 memory system
-        * If core status is power down, should be set '1' to L2 power down
-        */
-       value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
-       value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
-
-       /* Enable USE_STANDBY_WFI for all CORE */
-       pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
-
-       /*
-        * Set PSHOLD port for output high
-        */
-       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
-       value |= S5P_PS_HOLD_OUTPUT_HIGH;
-       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
-
-       /*
-        * Enable signal for PSHOLD port
-        */
-       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
-       value |= S5P_PS_HOLD_EN;
-       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
-}
-
-static void exynos5250_pmu_init(void)
-{
-       unsigned int value;
-       /*
-        * When SYS_WDTRESET is set, watchdog timer reset request
-        * is ignored by power management unit.
-        */
-       value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
-       value &= ~EXYNOS5_SYS_WDTRESET;
-       pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
-
-       value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
-       value &= ~EXYNOS5_SYS_WDTRESET;
-       pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
-}
-
-static void exynos5420_pmu_init(void)
-{
-       unsigned int value;
-       int i;
-
-       /*
-        * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
-        * for local power blocks to Low initially as per Table 8-4:
-        * "System-Level Power-Down Configuration Registers".
-        */
-       for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
-               pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
-
-       /* Enable USE_STANDBY_WFI for all CORE */
-       pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
-
-       value  = pmu_raw_readl(EXYNOS_L2_OPTION(0));
-       value &= ~EXYNOS5_USE_RETENTION;
-       pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
-
-       value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
-       value &= ~EXYNOS5_USE_RETENTION;
-       pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
-
-       /*
-        * If L2_COMMON is turned off, clocks related to ATB async
-        * bridge are gated. Thus, when ISP power is gated, LPI
-        * may get stuck.
-        */
-       value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
-       value |= EXYNOS5420_ATB_ISP_ARM;
-       pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
-
-       value  = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
-       value |= EXYNOS5420_ATB_KFC;
-       pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
-
-       /* Prevent issue of new bus request from L2 memory */
-       value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
-       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
-
-       value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
-       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
-
-       /* This setting is to reduce suspend/resume time */
-       pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
-
-       /* Serialized CPU wakeup of Eagle */
-       pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
-
-       pmu_raw_writel(SPREAD_USE_STANDWFI,
-                       EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
-
-       pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
-       pr_info("EXYNOS5420 PMU initialized\n");
-}
-
-static const struct exynos_pmu_data exynos3250_pmu_data = {
-       .pmu_config     = exynos3250_pmu_config,
-       .pmu_init       = exynos3250_pmu_init,
-       .powerdown_conf_extra   = exynos3250_powerdown_conf_extra,
-};
-
-static const struct exynos_pmu_data exynos4210_pmu_data = {
-       .pmu_config     = exynos4210_pmu_config,
-};
-
-static const struct exynos_pmu_data exynos4212_pmu_data = {
-       .pmu_config     = exynos4x12_pmu_config,
-};
-
-static const struct exynos_pmu_data exynos4412_pmu_data = {
-       .pmu_config             = exynos4x12_pmu_config,
-       .pmu_config_extra       = exynos4412_pmu_config,
-};
-
-static const struct exynos_pmu_data exynos5250_pmu_data = {
-       .pmu_config     = exynos5250_pmu_config,
-       .pmu_init       = exynos5250_pmu_init,
-       .powerdown_conf = exynos5_powerdown_conf,
-};
-
-static const struct exynos_pmu_data exynos5420_pmu_data = {
-       .pmu_config     = exynos5420_pmu_config,
-       .pmu_init       = exynos5420_pmu_init,
-       .powerdown_conf = exynos5420_powerdown_conf,
-};
-
-/*
- * PMU platform driver and devicetree bindings.
- */
-static const struct of_device_id exynos_pmu_of_device_ids[] = {
-       {
-               .compatible = "samsung,exynos3250-pmu",
-               .data = &exynos3250_pmu_data,
-       }, {
-               .compatible = "samsung,exynos4210-pmu",
-               .data = &exynos4210_pmu_data,
-       }, {
-               .compatible = "samsung,exynos4212-pmu",
-               .data = &exynos4212_pmu_data,
-       }, {
-               .compatible = "samsung,exynos4412-pmu",
-               .data = &exynos4412_pmu_data,
-       }, {
-               .compatible = "samsung,exynos5250-pmu",
-               .data = &exynos5250_pmu_data,
-       }, {
-               .compatible = "samsung,exynos5420-pmu",
-               .data = &exynos5420_pmu_data,
-       },
-       { /*sentinel*/ },
-};
-
-static int exynos_pmu_probe(struct platform_device *pdev)
-{
-       const struct of_device_id *match;
-       struct device *dev = &pdev->dev;
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       pmu_base_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pmu_base_addr))
-               return PTR_ERR(pmu_base_addr);
-
-       pmu_context = devm_kzalloc(&pdev->dev,
-                       sizeof(struct exynos_pmu_context),
-                       GFP_KERNEL);
-       if (!pmu_context) {
-               dev_err(dev, "Cannot allocate memory.\n");
-               return -ENOMEM;
-       }
-       pmu_context->dev = dev;
-
-       match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
-
-       pmu_context->pmu_data = match->data;
-
-       if (pmu_context->pmu_data->pmu_init)
-               pmu_context->pmu_data->pmu_init();
-
-       platform_set_drvdata(pdev, pmu_context);
-
-       dev_dbg(dev, "Exynos PMU Driver probe done\n");
-       return 0;
-}
-
-static struct platform_driver exynos_pmu_driver = {
-       .driver  = {
-               .name   = "exynos-pmu",
-               .of_match_table = exynos_pmu_of_device_ids,
-       },
-       .probe = exynos_pmu_probe,
-};
-
-static int __init exynos_pmu_init(void)
-{
-       return platform_driver_register(&exynos_pmu_driver);
-
-}
-postcore_initcall(exynos_pmu_init);
diff --git a/arch/arm/mach-exynos/regs-srom.h b/arch/arm/mach-exynos/regs-srom.h
deleted file mode 100644 (file)
index 5c4d442..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * S5P SROMC register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_REGS_SROM_H
-#define __PLAT_SAMSUNG_REGS_SROM_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_SROMREG(x)         (S5P_VA_SROMC + (x))
-
-#define S5P_SROM_BW            S5P_SROMREG(0x0)
-#define S5P_SROM_BC0           S5P_SROMREG(0x4)
-#define S5P_SROM_BC1           S5P_SROMREG(0x8)
-#define S5P_SROM_BC2           S5P_SROMREG(0xc)
-#define S5P_SROM_BC3           S5P_SROMREG(0x10)
-#define S5P_SROM_BC4           S5P_SROMREG(0x14)
-#define S5P_SROM_BC5           S5P_SROMREG(0x18)
-
-/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
-
-#define S5P_SROM_BW__DATAWIDTH__SHIFT          0
-#define S5P_SROM_BW__ADDRMODE__SHIFT           1
-#define S5P_SROM_BW__WAITENABLE__SHIFT         2
-#define S5P_SROM_BW__BYTEENABLE__SHIFT         3
-
-#define S5P_SROM_BW__CS_MASK                   0xf
-
-#define S5P_SROM_BW__NCS0__SHIFT               0
-#define S5P_SROM_BW__NCS1__SHIFT               4
-#define S5P_SROM_BW__NCS2__SHIFT               8
-#define S5P_SROM_BW__NCS3__SHIFT               12
-#define S5P_SROM_BW__NCS4__SHIFT               16
-#define S5P_SROM_BW__NCS5__SHIFT               20
-
-/* applies to same to BCS0 - BCS3 */
-
-#define S5P_SROM_BCX__PMC__SHIFT               0
-#define S5P_SROM_BCX__TACP__SHIFT              4
-#define S5P_SROM_BCX__TCAH__SHIFT              8
-#define S5P_SROM_BCX__TCOH__SHIFT              12
-#define S5P_SROM_BCX__TACC__SHIFT              16
-#define S5P_SROM_BCX__TCOS__SHIFT              24
-#define S5P_SROM_BCX__TACS__SHIFT              28
-
-#endif /* __PLAT_SAMSUNG_REGS_SROM_H */
index c169cc3049aa3bbe270905eea1840d1b603b125c..f21690937b7d13e7eeb94dc5b40a5496b1261beb 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/of_address.h>
 #include <linux/err.h>
 #include <linux/regulator/machine.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/smp_scu.h>
 #include <asm/suspend.h>
 
+#include <mach/map.h>
+
 #include <plat/pm-common.h>
 
 #include "common.h"
-#include "exynos-pmu.h"
-#include "regs-pmu.h"
-#include "regs-srom.h"
 
 #define REG_TABLE_END (-1U)
 
@@ -53,15 +54,6 @@ struct exynos_wkup_irq {
        u32 mask;
 };
 
-static struct sleep_save exynos_core_save[] = {
-       /* SROM side */
-       SAVE_ITEM(S5P_SROM_BW),
-       SAVE_ITEM(S5P_SROM_BC0),
-       SAVE_ITEM(S5P_SROM_BC1),
-       SAVE_ITEM(S5P_SROM_BC2),
-       SAVE_ITEM(S5P_SROM_BC3),
-};
-
 struct exynos_pm_data {
        const struct exynos_wkup_irq *wkup_irq;
        unsigned int wake_disable_mask;
@@ -343,8 +335,6 @@ static void exynos_pm_prepare(void)
        /* Set wake-up mask registers */
        exynos_pm_set_wakeup_mask();
 
-       s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
        exynos_pm_enter_sleep_mode();
 
        /* ensure at least INFORM0 has the resume address */
@@ -375,8 +365,6 @@ static void exynos5420_pm_prepare(void)
        /* Set wake-up mask registers */
        exynos_pm_set_wakeup_mask();
 
-       s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
        exynos_pmu_spare3 = pmu_raw_readl(S5P_PMU_SPARE3);
        /*
         * The cpu state needs to be saved and restored so that the
@@ -467,8 +455,6 @@ static void exynos_pm_resume(void)
        /* For release retention */
        exynos_pm_release_retention();
 
-       s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
        if (cpuid == ARM_CPU_PART_CORTEX_A9)
                scu_enable(S5P_VA_SCU);
 
@@ -535,8 +521,6 @@ static void exynos5420_pm_resume(void)
 
        pmu_raw_writel(exynos_pmu_spare3, S5P_PMU_SPARE3);
 
-       s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
 early_wakeup:
 
        tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1);
index 16496a071ecb8c3174639a15fbc3d6fe092d3e3b..cda330c93d610d35d3e82e9d9574d6611dc489d4 100644 (file)
@@ -94,8 +94,8 @@ static void mxc_expio_irq_handler(struct irq_desc *desc)
        /* irq = gpio irq number */
        desc->irq_data.chip->irq_mask(&desc->irq_data);
 
-       imr_val = __raw_readw(brd_io + INTR_MASK_REG);
-       int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
+       imr_val = imx_readw(brd_io + INTR_MASK_REG);
+       int_valid = imx_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
 
        expio_irq = 0;
        for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
@@ -117,17 +117,17 @@ static void expio_mask_irq(struct irq_data *d)
        u16 reg;
        u32 expio = d->hwirq;
 
-       reg = __raw_readw(brd_io + INTR_MASK_REG);
+       reg = imx_readw(brd_io + INTR_MASK_REG);
        reg |= (1 << expio);
-       __raw_writew(reg, brd_io + INTR_MASK_REG);
+       imx_writew(reg, brd_io + INTR_MASK_REG);
 }
 
 static void expio_ack_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
 
-       __raw_writew(1 << expio, brd_io + INTR_RESET_REG);
-       __raw_writew(0, brd_io + INTR_RESET_REG);
+       imx_writew(1 << expio, brd_io + INTR_RESET_REG);
+       imx_writew(0, brd_io + INTR_RESET_REG);
        expio_mask_irq(d);
 }
 
@@ -136,9 +136,9 @@ static void expio_unmask_irq(struct irq_data *d)
        u16 reg;
        u32 expio = d->hwirq;
 
-       reg = __raw_readw(brd_io + INTR_MASK_REG);
+       reg = imx_readw(brd_io + INTR_MASK_REG);
        reg &= ~(1 << expio);
-       __raw_writew(reg, brd_io + INTR_MASK_REG);
+       imx_writew(reg, brd_io + INTR_MASK_REG);
 }
 
 static struct irq_chip expio_irq_chip = {
@@ -162,9 +162,9 @@ int __init mxc_expio_init(u32 base, u32 intr_gpio)
        if (brd_io == NULL)
                return -ENOMEM;
 
-       if ((__raw_readw(brd_io + MAGIC_NUMBER1_REG) != 0xAAAA) ||
-           (__raw_readw(brd_io + MAGIC_NUMBER2_REG) != 0x5555) ||
-           (__raw_readw(brd_io + MAGIC_NUMBER3_REG) != 0xCAFE)) {
+       if ((imx_readw(brd_io + MAGIC_NUMBER1_REG) != 0xAAAA) ||
+           (imx_readw(brd_io + MAGIC_NUMBER2_REG) != 0x5555) ||
+           (imx_readw(brd_io + MAGIC_NUMBER3_REG) != 0xCAFE)) {
                pr_info("3-Stack Debug board not detected\n");
                iounmap(brd_io);
                brd_io = NULL;
@@ -181,10 +181,10 @@ int __init mxc_expio_init(u32 base, u32 intr_gpio)
        gpio_direction_input(intr_gpio);
 
        /* disable the interrupt and clear the status */
-       __raw_writew(0, brd_io + INTR_MASK_REG);
-       __raw_writew(0xFFFF, brd_io + INTR_RESET_REG);
-       __raw_writew(0, brd_io + INTR_RESET_REG);
-       __raw_writew(0x1F, brd_io + INTR_MASK_REG);
+       imx_writew(0, brd_io + INTR_MASK_REG);
+       imx_writew(0xFFFF, brd_io + INTR_RESET_REG);
+       imx_writew(0, brd_io + INTR_RESET_REG);
+       imx_writew(0x1F, brd_io + INTR_MASK_REG);
 
        irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
        WARN_ON(irq_base < 0);
index 15df34fbdf44c5abd4182f1c5a0514162e7fe635..ecc374060c27c55e2dfe4fa04d170e213ddd31b5 100644 (file)
@@ -2,6 +2,7 @@ menuconfig ARCH_MXC
        bool "Freescale i.MX family"
        depends on ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7 || ARM_SINGLE_ARMV7M
        select ARCH_REQUIRE_GPIOLIB
+       select ARCH_SUPPORTS_BIG_ENDIAN
        select ARM_CPU_SUSPEND if PM
        select CLKSRC_IMX_GPT
        select GENERIC_IRQ_CHIP
@@ -561,6 +562,7 @@ config SOC_IMX7D
        bool "i.MX7 Dual support"
        select PINCTRL_IMX7D
        select ARM_GIC
+       select HAVE_ARM_ARCH_TIMER
        select HAVE_IMX_ANATOP
        select HAVE_IMX_MMDC
        select HAVE_IMX_SRC
index 231bb250c5719d21962404a9301765cdcf499924..bd3555ee88c8bd8ff1352d2623c82abcd2d632b8 100644 (file)
@@ -151,7 +151,14 @@ void __init imx_init_revision_from_anatop(void)
                revision = IMX_CHIP_REVISION_1_5;
                break;
        default:
-               revision = IMX_CHIP_REVISION_UNKNOWN;
+               /*
+                * Fail back to return raw register value instead of 0xff.
+                * It will be easy to know version information in SOC if it
+                * can't be recognized by known version. And some chip's (i.MX7D)
+                * digprog value match linux version format, so it needn't map
+                * again and we can use register value directly.
+                */
+               revision = digprog & 0xff;
        }
 
        mxc_set_cpu_type(digprog >> 16 & 0xff);
index 1a8932335b2136b9ff941adc3d2bc35abdebb325..7fa176e792bd16fc7b71959ff0f57dcf23ec32d7 100644 (file)
@@ -66,12 +66,12 @@ static int avic_set_irq_fiq(unsigned int irq, unsigned int type)
                return -EINVAL;
 
        if (irq < AVIC_NUM_IRQS / 2) {
-               irqt = __raw_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq);
-               __raw_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL);
+               irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq);
+               imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL);
        } else {
                irq -= AVIC_NUM_IRQS / 2;
-               irqt = __raw_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq);
-               __raw_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH);
+               irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq);
+               imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH);
        }
 
        return 0;
@@ -94,8 +94,8 @@ static void avic_irq_suspend(struct irq_data *d)
        struct irq_chip_type *ct = gc->chip_types;
        int idx = d->hwirq >> 5;
 
-       avic_saved_mask_reg[idx] = __raw_readl(avic_base + ct->regs.mask);
-       __raw_writel(gc->wake_active, avic_base + ct->regs.mask);
+       avic_saved_mask_reg[idx] = imx_readl(avic_base + ct->regs.mask);
+       imx_writel(gc->wake_active, avic_base + ct->regs.mask);
 }
 
 static void avic_irq_resume(struct irq_data *d)
@@ -104,7 +104,7 @@ static void avic_irq_resume(struct irq_data *d)
        struct irq_chip_type *ct = gc->chip_types;
        int idx = d->hwirq >> 5;
 
-       __raw_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask);
+       imx_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask);
 }
 
 #else
@@ -140,7 +140,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
        u32 nivector;
 
        do {
-               nivector = __raw_readl(avic_base + AVIC_NIVECSR) >> 16;
+               nivector = imx_readl(avic_base + AVIC_NIVECSR) >> 16;
                if (nivector == 0xffff)
                        break;
 
@@ -164,16 +164,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
        /* put the AVIC into the reset value with
         * all interrupts disabled
         */
-       __raw_writel(0, avic_base + AVIC_INTCNTL);
-       __raw_writel(0x1f, avic_base + AVIC_NIMASK);
+       imx_writel(0, avic_base + AVIC_INTCNTL);
+       imx_writel(0x1f, avic_base + AVIC_NIMASK);
 
        /* disable all interrupts */
-       __raw_writel(0, avic_base + AVIC_INTENABLEH);
-       __raw_writel(0, avic_base + AVIC_INTENABLEL);
+       imx_writel(0, avic_base + AVIC_INTENABLEH);
+       imx_writel(0, avic_base + AVIC_INTENABLEL);
 
        /* all IRQ no FIQ */
-       __raw_writel(0, avic_base + AVIC_INTTYPEH);
-       __raw_writel(0, avic_base + AVIC_INTTYPEL);
+       imx_writel(0, avic_base + AVIC_INTTYPEH);
+       imx_writel(0, avic_base + AVIC_INTTYPEL);
 
        irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id());
        WARN_ON(irq_base < 0);
@@ -188,7 +188,7 @@ void __init mxc_init_irq(void __iomem *irqbase)
 
        /* Set default priority value (0) for all IRQ's */
        for (i = 0; i < 8; i++)
-               __raw_writel(0, avic_base + AVIC_NIPRIORITY(i));
+               imx_writel(0, avic_base + AVIC_NIPRIORITY(i));
 
        set_handle_irq(avic_handle_irq);
 
index fe8d36f7e30ed95209ae218811c4420af52b5bee..8d2ae4091465b33aabdd06cfabc69954b203644f 100644 (file)
@@ -39,8 +39,7 @@ static int mx27_read_cpu_rev(void)
         * the silicon revision very early we read it here to
         * avoid any further hooks
        */
-       val = __raw_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR
-                               + SYS_CHIP_ID));
+       val = imx_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR + SYS_CHIP_ID));
 
        mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF);
 
index fde1860a25216ed9763fb7ba8256ed0b16757e70..3daf1959a2f0ad555e1f5c94d2cb79ca2cd2db39 100644 (file)
@@ -39,7 +39,7 @@ static int mx31_read_cpu_rev(void)
        u32 i, srev;
 
        /* read SREV register from IIM module */
-       srev = __raw_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
+       srev = imx_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
        srev &= 0xff;
 
        for (i = 0; i < ARRAY_SIZE(mx31_cpu_type); i++)
index ec3aaa098c1706b3d7cf88b9c608302777419aad..8a54234df23b582b5bac78d365f2e35b4fe6cc84 100644 (file)
@@ -20,7 +20,7 @@ static int mx35_read_cpu_rev(void)
 {
        u32 rev;
 
-       rev = __raw_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
+       rev = imx_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
        switch (rev) {
        case 0x00:
                return IMX_CHIP_REVISION_1_0;
index 5b0f752d5507fe337408cfb9abc3a079beb458dd..6a96b7cf468ff95b7194ce42e9e58c2b1d08261a 100644 (file)
@@ -45,20 +45,20 @@ void __init imx_set_aips(void __iomem *base)
  * Set all MPROTx to be non-bufferable, trusted for R/W,
  * not forced to user-mode.
  */
-       __raw_writel(0x77777777, base + 0x0);
-       __raw_writel(0x77777777, base + 0x4);
+       imx_writel(0x77777777, base + 0x0);
+       imx_writel(0x77777777, base + 0x4);
 
 /*
  * Set all OPACRx to be non-bufferable, to not require
  * supervisor privilege level for access, allow for
  * write access and untrusted master access.
  */
-       __raw_writel(0x0, base + 0x40);
-       __raw_writel(0x0, base + 0x44);
-       __raw_writel(0x0, base + 0x48);
-       __raw_writel(0x0, base + 0x4C);
-       reg = __raw_readl(base + 0x50) & 0x00FFFFFF;
-       __raw_writel(reg, base + 0x50);
+       imx_writel(0x0, base + 0x40);
+       imx_writel(0x0, base + 0x44);
+       imx_writel(0x0, base + 0x48);
+       imx_writel(0x0, base + 0x4C);
+       reg = imx_readl(base + 0x50) & 0x00FFFFFF;
+       imx_writel(reg, base + 0x50);
 }
 
 void __init imx_aips_allow_unprivileged_access(
index 08ce20771bb3f9e49a88294e37216d7c562edb8a..fb9a73a57d00dbf11432f1771180f3391cfdfa55 100644 (file)
@@ -64,23 +64,23 @@ static inline void epit_irq_disable(void)
 {
        u32 val;
 
-       val = __raw_readl(timer_base + EPITCR);
+       val = imx_readl(timer_base + EPITCR);
        val &= ~EPITCR_OCIEN;
-       __raw_writel(val, timer_base + EPITCR);
+       imx_writel(val, timer_base + EPITCR);
 }
 
 static inline void epit_irq_enable(void)
 {
        u32 val;
 
-       val = __raw_readl(timer_base + EPITCR);
+       val = imx_readl(timer_base + EPITCR);
        val |= EPITCR_OCIEN;
-       __raw_writel(val, timer_base + EPITCR);
+       imx_writel(val, timer_base + EPITCR);
 }
 
 static void epit_irq_acknowledge(void)
 {
-       __raw_writel(EPITSR_OCIF, timer_base + EPITSR);
+       imx_writel(EPITSR_OCIF, timer_base + EPITSR);
 }
 
 static int __init epit_clocksource_init(struct clk *timer_clk)
@@ -98,9 +98,9 @@ static int epit_set_next_event(unsigned long evt,
 {
        unsigned long tcmp;
 
-       tcmp = __raw_readl(timer_base + EPITCNR);
+       tcmp = imx_readl(timer_base + EPITCNR);
 
-       __raw_writel(tcmp - evt, timer_base + EPITCMPR);
+       imx_writel(tcmp - evt, timer_base + EPITCMPR);
 
        return 0;
 }
@@ -213,11 +213,11 @@ void __init epit_timer_init(void __iomem *base, int irq)
        /*
         * Initialise to a known state (all timers off, and timing reset)
         */
-       __raw_writel(0x0, timer_base + EPITCR);
+       imx_writel(0x0, timer_base + EPITCR);
 
-       __raw_writel(0xffffffff, timer_base + EPITLR);
-       __raw_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN,
-                       timer_base + EPITCR);
+       imx_writel(0xffffffff, timer_base + EPITLR);
+       imx_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN,
+                  timer_base + EPITCR);
 
        /* init and register the timer to the framework */
        epit_clocksource_init(timer_clk);
index b5e976816b63cf3cd81926dd8378036d21b2888e..6c28d28b3c647982fb25d709f2eaef570e9085e3 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <asm/assembler.h>
 
 diag_reg_offset:
        .word   g_diag_reg - .
@@ -25,6 +26,7 @@ diag_reg_offset:
        .endm
 
 ENTRY(v7_secondary_startup)
+ARM_BE8(setend be)                     @ go BE8 if entered LE
        set_diag_reg
        b       secondary_startup
 ENDPROC(v7_secondary_startup)
index 0b5ba4bf572a252112de2d3c9d13d7cf758e3b00..3982e91b2f3ea49d993bcf2bc32472304c3dadc6 100644 (file)
@@ -57,10 +57,10 @@ void mxc_iomux_mode(unsigned int pin_mode)
 
        spin_lock(&gpio_mux_lock);
 
-       l = __raw_readl(reg);
+       l = imx_readl(reg);
        l &= ~(0xff << (field * 8));
        l |= mode << (field * 8);
-       __raw_writel(l, reg);
+       imx_writel(l, reg);
 
        spin_unlock(&gpio_mux_lock);
 }
@@ -82,10 +82,10 @@ void mxc_iomux_set_pad(enum iomux_pins pin, u32 config)
 
        spin_lock(&gpio_mux_lock);
 
-       l = __raw_readl(reg);
+       l = imx_readl(reg);
        l &= ~(0x1ff << (field * 10));
        l |= config << (field * 10);
-       __raw_writel(l, reg);
+       imx_writel(l, reg);
 
        spin_unlock(&gpio_mux_lock);
 }
@@ -163,12 +163,12 @@ void mxc_iomux_set_gpr(enum iomux_gp_func gp, bool en)
        u32 l;
 
        spin_lock(&gpio_mux_lock);
-       l = __raw_readl(IOMUXGPR);
+       l = imx_readl(IOMUXGPR);
        if (en)
                l |= gp;
        else
                l &= ~gp;
 
-       __raw_writel(l, IOMUXGPR);
+       imx_writel(l, IOMUXGPR);
        spin_unlock(&gpio_mux_lock);
 }
index ecd543664644135084e37d88002ecee88fa2d15a..7aa90c863ad99b21ede09f5438a70de62873c4b8 100644 (file)
@@ -38,12 +38,12 @@ static unsigned imx_iomuxv1_numports;
 
 static inline unsigned long imx_iomuxv1_readl(unsigned offset)
 {
-       return __raw_readl(imx_iomuxv1_baseaddr + offset);
+       return imx_readl(imx_iomuxv1_baseaddr + offset);
 }
 
 static inline void imx_iomuxv1_writel(unsigned long val, unsigned offset)
 {
-       __raw_writel(val, imx_iomuxv1_baseaddr + offset);
+       imx_writel(val, imx_iomuxv1_baseaddr + offset);
 }
 
 static inline void imx_iomuxv1_rmwl(unsigned offset,
index a53b2e64f98d547594243a67bf3581ac63250ea7..ca59d5f2ec92e4337e1f418ac83442ac8844806c 100644 (file)
@@ -45,13 +45,13 @@ int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad)
        u32 pad_ctrl = (pad & MUX_PAD_CTRL_MASK) >> MUX_PAD_CTRL_SHIFT;
 
        if (mux_ctrl_ofs)
-               __raw_writel(mux_mode, base + mux_ctrl_ofs);
+               imx_writel(mux_mode, base + mux_ctrl_ofs);
 
        if (sel_input_ofs)
-               __raw_writel(sel_input, base + sel_input_ofs);
+               imx_writel(sel_input, base + sel_input_ofs);
 
        if (!(pad_ctrl & NO_PAD_CTRL) && pad_ctrl_ofs)
-               __raw_writel(pad_ctrl, base + pad_ctrl_ofs);
+               imx_writel(pad_ctrl, base + pad_ctrl_ofs);
 
        return 0;
 }
index f2060523ba489c3feca3c2083fdd28a7594a4412..eaee47a2fcc0f6d2e196af4dea2d7c3dc74edd2b 100644 (file)
@@ -525,8 +525,8 @@ static void __init armadillo5x0_init(void)
        imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
 
        /* set NAND page size to 2k if not configured via boot mode pins */
-       __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
-                                       (1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
+       imx_writel(imx_readl(mx3_ccm_base + MXC_CCM_RCSR) | (1 << 30),
+                  mx3_ccm_base + MXC_CCM_RCSR);
 
        /* RTC */
        /* Get RTC IRQ and register the chip */
index b015129e4045847a3e62b72fb001a71e8b80c55c..6883fbaf9484b2da00324d42ce2ada3e70cefc66 100644 (file)
@@ -40,11 +40,10 @@ static void __init imx51_ipu_mipi_setup(void)
        WARN_ON(!hsc_addr);
 
        /* setup MIPI module to legacy mode */
-       __raw_writel(0xf00, hsc_addr);
+       imx_writel(0xf00, hsc_addr);
 
        /* CSI mode: reserved; DI control mode: legacy (from Freescale BSP) */
-       __raw_writel(__raw_readl(hsc_addr + 0x800) | 0x30ff,
-               hsc_addr + 0x800);
+       imx_writel(imx_readl(hsc_addr + 0x800) | 0x30ff, hsc_addr + 0x800);
 
        iounmap(hsc_addr);
 }
index eb1c3477c48ae1cedbbbc74bf2164110e831beae..267fad23b2257dc80d24cea3eca455f30a60d2ed 100644 (file)
@@ -202,9 +202,9 @@ static struct i2c_board_info mx27ads_i2c_devices[] = {
 static void vgpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
        if (value)
-               __raw_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_SET_REG);
+               imx_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_SET_REG);
        else
-               __raw_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_CLEAR_REG);
+               imx_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_CLEAR_REG);
 }
 
 static int vgpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
@@ -364,7 +364,7 @@ static void __init mx27ads_timer_init(void)
 {
        unsigned long fref = 26000000;
 
-       if ((__raw_readw(PBC_VERSION_REG) & CKIH_27MHZ_BIT_SET) == 0)
+       if ((imx_readw(PBC_VERSION_REG) & CKIH_27MHZ_BIT_SET) == 0)
                fref = 27000000;
 
        mx27_clocks_init(fref);
index 2b147e4bf9c91297deb0522bcfb14150b61f5b11..4f2c56d44ba14de19f10ccc2dc16927a4b03a545 100644 (file)
@@ -160,8 +160,8 @@ static void mx31ads_expio_irq_handler(struct irq_desc *desc)
        u32 int_valid;
        u32 expio_irq;
 
-       imr_val = __raw_readw(PBC_INTMASK_SET_REG);
-       int_valid = __raw_readw(PBC_INTSTATUS_REG) & imr_val;
+       imr_val = imx_readw(PBC_INTMASK_SET_REG);
+       int_valid = imx_readw(PBC_INTSTATUS_REG) & imr_val;
 
        expio_irq = 0;
        for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
@@ -180,8 +180,8 @@ static void expio_mask_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
        /* mask the interrupt */
-       __raw_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
-       __raw_readw(PBC_INTMASK_CLEAR_REG);
+       imx_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
+       imx_readw(PBC_INTMASK_CLEAR_REG);
 }
 
 /*
@@ -192,7 +192,7 @@ static void expio_ack_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
        /* clear the interrupt status */
-       __raw_writew(1 << expio, PBC_INTSTATUS_REG);
+       imx_writew(1 << expio, PBC_INTSTATUS_REG);
 }
 
 /*
@@ -203,7 +203,7 @@ static void expio_unmask_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
        /* unmask the interrupt */
-       __raw_writew(1 << expio, PBC_INTMASK_SET_REG);
+       imx_writew(1 << expio, PBC_INTMASK_SET_REG);
 }
 
 static struct irq_chip expio_irq_chip = {
@@ -226,8 +226,8 @@ static void __init mx31ads_init_expio(void)
        mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_GPIO1_4, IOMUX_CONFIG_GPIO), "expio");
 
        /* disable the interrupt and clear the status */
-       __raw_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
-       __raw_writew(0xFFFF, PBC_INTSTATUS_REG);
+       imx_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
+       imx_writew(0xFFFF, PBC_INTSTATUS_REG);
 
        irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
        WARN_ON(irq_base < 0);
index bb6f8a52a6b8b2ebaba9b8be1dc6f9478cfc3c2c..4f2d99888afd28833044022396a6ea9582a2c994 100644 (file)
@@ -509,7 +509,7 @@ static void mx31moboard_poweroff(void)
 
        mxc_iomux_mode(MX31_PIN_WATCHDOG_RST__WATCHDOG_RST);
 
-       __raw_writew(1 << 6 | 1 << 2, MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
+       imx_writew(1 << 6 | 1 << 2, MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
 }
 
 static int mx31moboard_baseboard;
index 5c27646047270fc4fbcab94e689f5e533d3d9d09..34df64f133ed2f360ee57354c75a10649815bf90 100644 (file)
@@ -190,9 +190,9 @@ static struct platform_device qong_nand_device = {
 static void __init qong_init_nand_mtd(void)
 {
        /* init CS */
-       __raw_writel(0x00004f00, MX31_IO_ADDRESS(MX31_WEIM_CSCRxU(3)));
-       __raw_writel(0x20013b31, MX31_IO_ADDRESS(MX31_WEIM_CSCRxL(3)));
-       __raw_writel(0x00020800, MX31_IO_ADDRESS(MX31_WEIM_CSCRxA(3)));
+       imx_writel(0x00004f00, MX31_IO_ADDRESS(MX31_WEIM_CSCRxU(3)));
+       imx_writel(0x20013b31, MX31_IO_ADDRESS(MX31_WEIM_CSCRxL(3)));
+       imx_writel(0x00020800, MX31_IO_ADDRESS(MX31_WEIM_CSCRxA(3)));
 
        mxc_iomux_set_gpr(MUX_SDCTL_CSD1_SEL, true);
 
index a5b1af6d7441e262a49bd19d2611adfdc3364b14..d327042567817adf27b95b9eac2a29ac4df23d98 100644 (file)
@@ -193,4 +193,9 @@ extern struct cpu_op *(*get_cpu_op)(int *op);
 #define cpu_is_mx3()   (cpu_is_mx31() || cpu_is_mx35())
 #define cpu_is_mx2()   (cpu_is_mx21() || cpu_is_mx27())
 
+#define imx_readl      readl_relaxed
+#define imx_readw      readw_relaxed
+#define imx_writel     writel_relaxed
+#define imx_writew     writew_relaxed
+
 #endif /*  __ASM_ARCH_MXC_H__ */
index 56d02d064fbf941c3b070cced2bf2f2e88a9056e..43096c8990d4cb913935b62c758107316e46a637 100644 (file)
@@ -19,9 +19,9 @@ static int mx27_suspend_enter(suspend_state_t state)
        switch (state) {
        case PM_SUSPEND_MEM:
                /* Clear MPEN and SPEN to disable MPLL/SPLL */
-               cscr = __raw_readl(MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
+               cscr = imx_readl(MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
                cscr &= 0xFFFFFFFC;
-               __raw_writel(cscr, MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
+               imx_writel(cscr, MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
                /* Executes WFI */
                cpu_do_idle();
                break;
index 6a07006ff0f48136591ff0909636cd73b774bf08..94c0898751d8538290a258fa1cb3d4d50ba8b9f0 100644 (file)
  */
 void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
 {
-       int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
+       int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR);
        reg &= ~MXC_CCM_CCMR_LPM_MASK;
 
        switch (mode) {
        case MX3_WAIT:
                if (cpu_is_mx35())
                        reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
-               __raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
+               imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
                break;
        default:
                pr_err("Unknown cpu power mode: %d\n", mode);
index 532d4b08276dc84c149b525e6fcc0a85d30a543b..868781fd460c788950ac59e6ef197bede4ab6660 100644 (file)
@@ -153,15 +153,15 @@ static void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
        int stop_mode = 0;
 
        /* always allow platform to issue a deep sleep mode request */
-       plat_lpc = __raw_readl(cortex_base + MXC_CORTEXA8_PLAT_LPC) &
+       plat_lpc = imx_readl(cortex_base + MXC_CORTEXA8_PLAT_LPC) &
            ~(MXC_CORTEXA8_PLAT_LPC_DSM);
-       ccm_clpcr = __raw_readl(ccm_base + MXC_CCM_CLPCR) &
+       ccm_clpcr = imx_readl(ccm_base + MXC_CCM_CLPCR) &
                    ~(MXC_CCM_CLPCR_LPM_MASK);
-       arm_srpgcr = __raw_readl(gpc_base + MXC_SRPG_ARM_SRPGCR) &
+       arm_srpgcr = imx_readl(gpc_base + MXC_SRPG_ARM_SRPGCR) &
                     ~(MXC_SRPGCR_PCR);
-       empgc0 = __raw_readl(gpc_base + MXC_SRPG_EMPGC0_SRPGCR) &
+       empgc0 = imx_readl(gpc_base + MXC_SRPG_EMPGC0_SRPGCR) &
                 ~(MXC_SRPGCR_PCR);
-       empgc1 = __raw_readl(gpc_base + MXC_SRPG_EMPGC1_SRPGCR) &
+       empgc1 = imx_readl(gpc_base + MXC_SRPG_EMPGC1_SRPGCR) &
                 ~(MXC_SRPGCR_PCR);
 
        switch (mode) {
@@ -196,17 +196,17 @@ static void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
                return;
        }
 
-       __raw_writel(plat_lpc, cortex_base + MXC_CORTEXA8_PLAT_LPC);
-       __raw_writel(ccm_clpcr, ccm_base + MXC_CCM_CLPCR);
-       __raw_writel(arm_srpgcr, gpc_base + MXC_SRPG_ARM_SRPGCR);
-       __raw_writel(arm_srpgcr, gpc_base + MXC_SRPG_NEON_SRPGCR);
+       imx_writel(plat_lpc, cortex_base + MXC_CORTEXA8_PLAT_LPC);
+       imx_writel(ccm_clpcr, ccm_base + MXC_CCM_CLPCR);
+       imx_writel(arm_srpgcr, gpc_base + MXC_SRPG_ARM_SRPGCR);
+       imx_writel(arm_srpgcr, gpc_base + MXC_SRPG_NEON_SRPGCR);
 
        if (stop_mode) {
                empgc0 |= MXC_SRPGCR_PCR;
                empgc1 |= MXC_SRPGCR_PCR;
 
-               __raw_writel(empgc0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
-               __raw_writel(empgc1, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
+               imx_writel(empgc0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
+               imx_writel(empgc1, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
        }
 }
 
@@ -228,8 +228,8 @@ static int mx5_suspend_enter(suspend_state_t state)
                flush_cache_all();
 
                /*clear the EMPGC0/1 bits */
-               __raw_writel(0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
-               __raw_writel(0, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
+               imx_writel(0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
+               imx_writel(0, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
 
                if (imx5_suspend_in_ocram_fn)
                        imx5_suspend_in_ocram_fn(suspend_ocram_base);
index 4470376af5f815fd5f8f2d12c5e7d18306675bb4..58924b3844df551a270aac552b0803fb5eedf8dc 100644 (file)
@@ -561,13 +561,13 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
        goto put_node;
 
 pl310_cache_map_failed:
-       iounmap(&pm_info->gpc_base.vbase);
+       iounmap(pm_info->gpc_base.vbase);
 gpc_map_failed:
-       iounmap(&pm_info->iomuxc_base.vbase);
+       iounmap(pm_info->iomuxc_base.vbase);
 iomuxc_map_failed:
-       iounmap(&pm_info->src_base.vbase);
+       iounmap(pm_info->src_base.vbase);
 src_map_failed:
-       iounmap(&pm_info->mmdc_base.vbase);
+       iounmap(pm_info->mmdc_base.vbase);
 put_node:
        of_node_put(node);
 
index 51c35013b673b0d356c12ae8d0853557243c1f34..93d4a9a393531677dc0d2e5fbb833b69abe8e068 100644 (file)
@@ -54,7 +54,7 @@ void mxc_restart(enum reboot_mode mode, const char *cmd)
                wcr_enable = (1 << 2);
 
        /* Assert SRS signal */
-       __raw_writew(wcr_enable, wdog_base);
+       imx_writew(wcr_enable, wdog_base);
        /*
         * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be
         * written twice), we add another two writes to ensure there must be at
@@ -62,8 +62,8 @@ void mxc_restart(enum reboot_mode mode, const char *cmd)
         * the target check here, since the writes shouldn't be a huge burden
         * for other platforms.
         */
-       __raw_writew(wcr_enable, wdog_base);
-       __raw_writew(wcr_enable, wdog_base);
+       imx_writew(wcr_enable, wdog_base);
+       imx_writew(wcr_enable, wdog_base);
 
        /* wait for reset to assert... */
        mdelay(500);
index 4de65eeda1eb15666e9c82557f50477e37628ec8..ae23d50f7861b4d804e38025927922f0eb5f6e1f 100644 (file)
@@ -65,10 +65,10 @@ static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
                return -EINVAL;
        mask = 1U << (irq & 0x1F);
 
-       value = __raw_readl(tzic_base + TZIC_INTSEC0(index)) | mask;
+       value = imx_readl(tzic_base + TZIC_INTSEC0(index)) | mask;
        if (type)
                value &= ~mask;
-       __raw_writel(value, tzic_base + TZIC_INTSEC0(index));
+       imx_writel(value, tzic_base + TZIC_INTSEC0(index));
 
        return 0;
 }
@@ -82,15 +82,15 @@ static void tzic_irq_suspend(struct irq_data *d)
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        int idx = d->hwirq >> 5;
 
-       __raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
+       imx_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
 }
 
 static void tzic_irq_resume(struct irq_data *d)
 {
        int idx = d->hwirq >> 5;
 
-       __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)),
-                    tzic_base + TZIC_WAKEUP0(idx));
+       imx_writel(imx_readl(tzic_base + TZIC_ENSET0(idx)),
+                  tzic_base + TZIC_WAKEUP0(idx));
 }
 
 #else
@@ -135,8 +135,8 @@ static void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs)
                handled = 0;
 
                for (i = 0; i < 4; i++) {
-                       stat = __raw_readl(tzic_base + TZIC_HIPND(i)) &
-                               __raw_readl(tzic_base + TZIC_INTSEC0(i));
+                       stat = imx_readl(tzic_base + TZIC_HIPND(i)) &
+                               imx_readl(tzic_base + TZIC_INTSEC0(i));
 
                        while (stat) {
                                handled = 1;
@@ -166,18 +166,18 @@ void __init tzic_init_irq(void)
        /* put the TZIC into the reset value with
         * all interrupts disabled
         */
-       i = __raw_readl(tzic_base + TZIC_INTCNTL);
+       i = imx_readl(tzic_base + TZIC_INTCNTL);
 
-       __raw_writel(0x80010001, tzic_base + TZIC_INTCNTL);
-       __raw_writel(0x1f, tzic_base + TZIC_PRIOMASK);
-       __raw_writel(0x02, tzic_base + TZIC_SYNCCTRL);
+       imx_writel(0x80010001, tzic_base + TZIC_INTCNTL);
+       imx_writel(0x1f, tzic_base + TZIC_PRIOMASK);
+       imx_writel(0x02, tzic_base + TZIC_SYNCCTRL);
 
        for (i = 0; i < 4; i++)
-               __raw_writel(0xFFFFFFFF, tzic_base + TZIC_INTSEC0(i));
+               imx_writel(0xFFFFFFFF, tzic_base + TZIC_INTSEC0(i));
 
        /* disable all interrupts */
        for (i = 0; i < 4; i++)
-               __raw_writel(0xFFFFFFFF, tzic_base + TZIC_ENCLEAR0(i));
+               imx_writel(0xFFFFFFFF, tzic_base + TZIC_ENCLEAR0(i));
 
        /* all IRQ no FIQ Warning :: No selection */
 
@@ -214,13 +214,13 @@ int tzic_enable_wake(void)
 {
        unsigned int i;
 
-       __raw_writel(1, tzic_base + TZIC_DSMINT);
-       if (unlikely(__raw_readl(tzic_base + TZIC_DSMINT) == 0))
+       imx_writel(1, tzic_base + TZIC_DSMINT);
+       if (unlikely(imx_readl(tzic_base + TZIC_DSMINT) == 0))
                return -EAGAIN;
 
        for (i = 0; i < 4; i++)
-               __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(i)),
-                            tzic_base + TZIC_WAKEUP0(i));
+               imx_writel(imx_readl(tzic_base + TZIC_ENSET0(i)),
+                          tzic_base + TZIC_WAKEUP0(i));
 
        return 0;
 }
index b01bdc9baf89520096ff348ba5d20e14c4aa0b87..b2a85ba13f088fb451377fa09d800df34e926846 100644 (file)
@@ -2,22 +2,16 @@ menuconfig ARCH_INTEGRATOR
        bool "ARM Ltd. Integrator family"
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6
        select ARM_AMBA
-       select ARM_PATCH_PHYS_VIRT if MMU
-       select AUTO_ZRELADDR
-       select COMMON_CLK
        select COMMON_CLK_VERSATILE
-       select GENERIC_CLOCKEVENTS
        select HAVE_TCM
        select ICST
        select MFD_SYSCON
-       select MULTI_IRQ_HANDLER
        select PLAT_VERSATILE
        select POWER_RESET
        select POWER_RESET_VERSATILE
        select POWER_SUPPLY
        select SOC_INTEGRATOR_CM
        select SPARSE_IRQ
-       select USE_OF
        select VERSATILE_FPGA_IRQ
        help
          Support for ARM's Integrator platform.
diff --git a/arch/arm/mach-integrator/Makefile.boot b/arch/arm/mach-integrator/Makefile.boot
deleted file mode 100644 (file)
index ff0a4b5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
-
diff --git a/arch/arm/mach-keystone/Makefile.boot b/arch/arm/mach-keystone/Makefile.boot
deleted file mode 100644 (file)
index f3835c4..0000000
+++ /dev/null
@@ -1 +0,0 @@
-zreladdr-y     := 0x80008000
index c279293f084cb87c2e0cc4cbf4ada1e2984e83f2..e6b9cb1e6709753b6e25166d8b9cac395d9a2c42 100644 (file)
@@ -63,7 +63,7 @@ static void __init keystone_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static phys_addr_t keystone_virt_to_idmap(unsigned long x)
+static unsigned long keystone_virt_to_idmap(unsigned long x)
 {
        return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
 }
@@ -100,6 +100,7 @@ static const char *const keystone_match[] __initconst = {
        "ti,k2hk",
        "ti,k2e",
        "ti,k2l",
+       "ti,k2g",
        "ti,keystone",
        NULL,
 };
diff --git a/arch/arm/mach-mmp/Makefile.boot b/arch/arm/mach-mmp/Makefile.boot
deleted file mode 100644 (file)
index 5edf03e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-   zreladdr-y  += 0x00008000
index a32575fa3fba4d727e4584e7c169a5de09f46c48..c32f85559c6509e7f64116c351730141db16938a 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig ARCH_MV78XX0
-       bool "Marvell MV78xx0" if ARCH_MULTI_V5
+       bool "Marvell MV78xx0"
+       depends on ARCH_MULTI_V5
        select ARCH_REQUIRE_GPIOLIB
        select CPU_FEROCEON
        select MVEBU_MBUS
diff --git a/arch/arm/mach-mv78xx0/Makefile.boot b/arch/arm/mach-mv78xx0/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
index f9597b701028a107d6acc11ce481fcaddc5e0411..46c742d3bd41e65c0f0fbaa2fc5cb8d3561b0c0a 100644 (file)
@@ -140,6 +140,7 @@ static void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
                panic("Cannot find 'marvell,bootrom' compatible node");
 
        err = of_address_to_resource(node, 0, &res);
+       of_node_put(node);
        if (err < 0)
                panic("Cannot get 'bootrom' node address");
 
index 3d90ef19be2b4cae09a187e5e9541154f714ed03..2da8e5dfcf24df4fca3ba9384b84726654dea4d8 100644 (file)
@@ -3,20 +3,17 @@ menu "NetX Implementations"
 
 config MACH_NXDKN
        bool "Enable Hilscher nxdkn Eval Board support"
-       depends on ARCH_NETX
        help
          Board support for the Hilscher NetX Eval Board
 
 config MACH_NXDB500
        bool "Enable Hilscher nxdb500 Eval Board support"
-       depends on ARCH_NETX
        select ARM_AMBA
        help
          Board support for the Hilscher nxdb500 Eval Board
 
 config MACH_NXEB500HMI
        bool "Enable Hilscher nxeb500hmi Eval Board support"
-       depends on ARCH_NETX
        select ARM_AMBA
        help
          Board support for the Hilscher nxeb500hmi Eval Board
diff --git a/arch/arm/mach-nspire/Makefile.boot b/arch/arm/mach-nspire/Makefile.boot
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/arch/arm/mach-omap2/Makefile.boot b/arch/arm/mach-omap2/Makefile.boot
deleted file mode 100644 (file)
index b03e562..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-  zreladdr-y           += 0x80008000
-params_phys-y          := 0x80000100
-initrd_phys-y          := 0x80800000
index 9cda974a3009f3295e7bd013709f1bf586752282..d7f1d69daf6d4d083005e15d11d556c412aacfbc 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/pinctrl/machine.h>
-#include <linux/platform_data/mailbox-omap.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -66,32 +65,6 @@ static int __init omap3_l3_init(void)
 }
 omap_postcore_initcall(omap3_l3_init);
 
-#if defined(CONFIG_OMAP2PLUS_MBOX) || defined(CONFIG_OMAP2PLUS_MBOX_MODULE)
-static inline void __init omap_init_mbox(void)
-{
-       struct omap_hwmod *oh;
-       struct platform_device *pdev;
-       struct omap_mbox_pdata *pdata;
-
-       oh = omap_hwmod_lookup("mailbox");
-       if (!oh) {
-               pr_err("%s: unable to find hwmod\n", __func__);
-               return;
-       }
-       if (!oh->dev_attr) {
-               pr_err("%s: hwmod doesn't have valid attrs\n", __func__);
-               return;
-       }
-
-       pdata = (struct omap_mbox_pdata *)oh->dev_attr;
-       pdev = omap_device_build("omap-mailbox", -1, oh, pdata, sizeof(*pdata));
-       WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n",
-                                               __func__, PTR_ERR(pdev));
-}
-#else
-static inline void omap_init_mbox(void) { }
-#endif /* CONFIG_OMAP2PLUS_MBOX */
-
 static inline void omap_init_sti(void) {}
 
 #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
@@ -229,7 +202,6 @@ static int __init omap2_init_devices(void)
                 * please keep these calls, and their implementations above,
                 * in alphabetical order so they're easier to sort through.
                 */
-               omap_init_mbox();
                omap_init_mcspi();
                omap_init_sham();
                omap_init_aes();
index e781e4fae13a92fd68aa04ac891b9731bc8fa60e..a935d28443dab40e1a8005b7144aeb8de436c73c 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/platform_data/pinctrl-single.h>
 #include <linux/platform_data/iommu-omap.h>
 #include <linux/platform_data/wkup_m3.h>
+#include <linux/platform_data/pwm_omap_dmtimer.h>
+#include <plat/dmtimer.h>
 
 #include "common.h"
 #include "common-board-devices.h"
@@ -449,6 +451,24 @@ void omap_auxdata_legacy_init(struct device *dev)
        dev->platform_data = &twl_gpio_auxdata;
 }
 
+/* Dual mode timer PWM callbacks platdata */
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = {
+       .request_by_node = omap_dm_timer_request_by_node,
+       .free = omap_dm_timer_free,
+       .enable = omap_dm_timer_enable,
+       .disable = omap_dm_timer_disable,
+       .get_fclk = omap_dm_timer_get_fclk,
+       .start = omap_dm_timer_start,
+       .stop = omap_dm_timer_stop,
+       .set_load = omap_dm_timer_set_load,
+       .set_match = omap_dm_timer_set_match,
+       .set_pwm = omap_dm_timer_set_pwm,
+       .set_prescaler = omap_dm_timer_set_prescaler,
+       .write_counter = omap_dm_timer_write_counter,
+};
+#endif
+
 /*
  * Few boards still need auxdata populated before we populate
  * the dev entries in of_platform_populate().
@@ -502,6 +522,9 @@ static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("ti,am4372-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
                       &wkup_m3_data),
 #endif
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+       OF_DEV_AUXDATA("ti,omap-dmtimer-pwm", 0, NULL, &pwm_dmtimer_pdata),
+#endif
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
        OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu",
                       &omap4_iommu_pdata),
index eafd120b53f1bc15c82f2cc47dc8033e31ca566e..1b9f0520dea9154afa31f9668241e03f211fdc6a 100644 (file)
@@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
        stmfd   sp!, {lr}       @ save registers on stack
        /* Setup so that we will disable and enable l2 */
        mov     r1, #0x1
-       adrl    r2, l2dis_3630  @ may be too distant for plain adr
-       str     r1, [r2]
+       adrl    r3, l2dis_3630_offset   @ may be too distant for plain adr
+       ldr     r2, [r3]                @ value for offset
+       str     r1, [r2, r3]            @ write to l2dis_3630
        ldmfd   sp!, {pc}       @ restore regs and return
 ENDPROC(enable_omap3630_toggle_l2_on_restore)
 
-       .text
-/* Function to call rom code to save secure ram context */
+/*
+ * Function to call rom code to save secure ram context. This gets
+ * relocated to SRAM, so it can be all in .data section. Otherwise
+ * we need to initialize api_params separately.
+ */
+       .data
        .align  3
 ENTRY(save_secure_ram_context)
        stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
@@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
 ENTRY(save_secure_ram_context_sz)
        .word   . - save_secure_ram_context
 
+       .text
+
 /*
  * ======================
  * == Idle entry point ==
@@ -289,12 +296,6 @@ wait_sdrc_ready:
        bic     r5, r5, #0x40
        str     r5, [r4]
 
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
 is_dll_in_lock_mode:
        /* Is dll in lock mode? */
        ldr     r4, sdrc_dlla_ctrl
@@ -302,11 +303,7 @@ is_dll_in_lock_mode:
        tst     r5, #0x4
        bne     exit_nonoff_modes       @ Return if locked
        /* wait till dll locks */
-       adr     r7, kick_counter
 wait_dll_lock_timed:
-       ldr     r4, wait_dll_lock_counter
-       add     r4, r4, #1
-       str     r4, [r7, #wait_dll_lock_counter - kick_counter]
        ldr     r4, sdrc_dlla_status
        /* Wait 20uS for lock */
        mov     r6, #8
@@ -330,9 +327,6 @@ kick_dll:
        orr     r6, r6, #(1<<3)         @ enable dll
        str     r6, [r4]
        dsb
-       ldr     r4, kick_counter
-       add     r4, r4, #1
-       str     r4, [r7]                @ kick_counter
        b       wait_dll_lock_timed
 
 exit_nonoff_modes:
@@ -360,15 +354,6 @@ sdrc_dlla_status:
        .word   SDRC_DLLA_STATUS_V
 sdrc_dlla_ctrl:
        .word   SDRC_DLLA_CTRL_V
-       /*
-        * When exporting to userspace while the counters are in SRAM,
-        * these 2 words need to be at the end to facilitate retrival!
-        */
-kick_counter:
-       .word   0
-wait_dll_lock_counter:
-       .word   0
-
 ENTRY(omap3_do_wfi_sz)
        .word   . - omap3_do_wfi
 
@@ -437,7 +422,9 @@ ENTRY(omap3_restore)
        cmp     r2, #0x0        @ Check if target power state was OFF or RET
        bne     logic_l1_restore
 
-       ldr     r0, l2dis_3630
+       adr     r1, l2dis_3630_offset   @ address for offset
+       ldr     r0, [r1]                @ value for offset
+       ldr     r0, [r1, r0]            @ value at l2dis_3630
        cmp     r0, #0x1        @ should we disable L2 on 3630?
        bne     skipl2dis
        mrc     p15, 0, r0, c1, c0, 1
@@ -449,12 +436,14 @@ skipl2dis:
        and     r1, #0x700
        cmp     r1, #0x300
        beq     l2_inv_gp
+       adr     r0, l2_inv_api_params_offset
+       ldr     r3, [r0]
+       add     r3, r3, r0              @ r3 points to dummy parameters
        mov     r0, #40                 @ set service ID for PPA
        mov     r12, r0                 @ copy secure Service ID in r12
        mov     r1, #0                  @ set task id for ROM code in r1
        mov     r2, #4                  @ set some flags in r2, r6
        mov     r6, #0xff
-       adr     r3, l2_inv_api_params   @ r3 points to dummy parameters
        dsb                             @ data write barrier
        dmb                             @ data memory barrier
        smc     #1                      @ call SMI monitor (smi #1)
@@ -488,8 +477,8 @@ skipl2dis:
        b       logic_l1_restore
 
        .align
-l2_inv_api_params:
-       .word   0x1, 0x00
+l2_inv_api_params_offset:
+       .long   l2_inv_api_params - .
 l2_inv_gp:
        /* Execute smi to invalidate L2 cache */
        mov r12, #0x1                   @ set up to invalidate L2
@@ -506,7 +495,9 @@ l2_inv_gp:
        mov     r12, #0x2
        smc     #0                      @ Call SMI monitor (smieq)
 logic_l1_restore:
-       ldr     r1, l2dis_3630
+       adr     r0, l2dis_3630_offset   @ adress for offset
+       ldr     r1, [r0]                @ value for offset
+       ldr     r1, [r0, r1]            @ value at l2dis_3630
        cmp     r1, #0x1                @ Test if L2 re-enable needed on 3630
        bne     skipl2reen
        mrc     p15, 0, r1, c1, c0, 1
@@ -535,9 +526,17 @@ control_stat:
        .word   CONTROL_STAT
 control_mem_rta:
        .word   CONTROL_MEM_RTA_CTRL
+l2dis_3630_offset:
+       .long   l2dis_3630 - .
+
+       .data
 l2dis_3630:
        .word   0
 
+       .data
+l2_inv_api_params:
+       .word   0x1, 0x00
+
 /*
  * Internal functions
  */
index 9b09d85d811a1c52b0fa735a2e2743e05346843d..c7a3b4aab4b5441249ddd9a7fdc362ef9370d737 100644 (file)
        dsb
 .endm
 
-ppa_zero_params:
-       .word           0x0
-
-ppa_por_params:
-       .word           1, 0
-
 #ifdef CONFIG_ARCH_OMAP4
 
 /*
@@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
        beq     skip_ns_smp_enable
 ppa_actrl_retry:
        mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
-       adr     r3, ppa_zero_params             @ Pointer to parameters
+       adr     r1, ppa_zero_params_offset
+       ldr     r3, [r1]
+       add     r3, r3, r1                      @ Pointer to ppa_zero_params
        mov     r1, #0x0                        @ Process ID
        mov     r2, #0x4                        @ Flag
        mov     r6, #0xff
@@ -303,7 +299,9 @@ skip_ns_smp_enable:
        ldr     r0, =OMAP4_PPA_L2_POR_INDEX
        ldr     r1, =OMAP44XX_SAR_RAM_BASE
        ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
-       adr     r3, ppa_por_params
+       adr     r1, ppa_por_params_offset
+       ldr     r3, [r1]
+       add     r3, r3, r1                      @ Pointer to ppa_por_params
        str     r4, [r3, #0x04]
        mov     r1, #0x0                        @ Process ID
        mov     r2, #0x4                        @ Flag
@@ -328,6 +326,8 @@ skip_l2en:
 #endif
 
        b       cpu_resume                      @ Jump to generic resume
+ppa_por_params_offset:
+       .long   ppa_por_params - .
 ENDPROC(omap4_cpu_resume)
 #endif /* CONFIG_ARCH_OMAP4 */
 
@@ -380,4 +380,13 @@ ENTRY(omap_do_wfi)
        nop
 
        ldmfd   sp!, {pc}
+ppa_zero_params_offset:
+       .long   ppa_zero_params - .
 ENDPROC(omap_do_wfi)
+
+       .data
+ppa_zero_params:
+       .word           0
+
+ppa_por_params:
+       .word           1, 0
diff --git a/arch/arm/mach-orion5x/Makefile.boot b/arch/arm/mach-orion5x/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
diff --git a/arch/arm/mach-prima2/Makefile.boot b/arch/arm/mach-prima2/Makefile.boot
deleted file mode 100644 (file)
index c77a488..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-zreladdr-y             += 0x00008000
-params_phys-y          := 0x00000100
-initrd_phys-y          := 0x00800000
index 73494500b51cbb79b2a1aa025f5ad54f5cf60276..e165ae4600f5d5ef27bb54dbb522d57ac917584b 100644 (file)
@@ -11,16 +11,17 @@ menuconfig ARCH_QCOM
 
 if ARCH_QCOM
 
-config ARCH_MSM8X60
-       bool "Enable support for MSM8X60"
+config ARCH_QCOM_A_FAMILY
+       bool "Support a-family chipsets (msm8660, msm8960, apq8064)"
+       default y
        select CLKSRC_QCOM
+       help
+         Select this option if you want to support a-family platforms.
 
-config ARCH_MSM8960
-       bool "Enable support for MSM8960"
-       select CLKSRC_QCOM
+         A-family includes all Snapdragon S1/S2/S3/S4 chips before 2013,
+         up to the MSM8x60 and APQ8064 SoCs.
 
-config ARCH_MSM8974
-       bool "Enable support for MSM8974"
-       select HAVE_ARM_ARCH_TIMER
+         B-family includes all Snapdragon 2xx/4xx/6xx/8xx models starting
+         in 2013 with the MSM8x74 SoC.
 
 endif
index def40a0dd60cd290d91233cc47a1a8bad4ea6aa1..70ab4a25a5f853d4d8dade14490618d2523fcd13 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig ARCH_REALVIEW
-       bool "ARM Ltd. RealView family" if ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
+       bool "ARM Ltd. RealView family"
+       depends on ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
        select ARM_AMBA
        select ARM_TIMER_SP804
        select COMMON_CLK_VERSATILE
diff --git a/arch/arm/mach-realview/Makefile.boot b/arch/arm/mach-realview/Makefile.boot
deleted file mode 100644 (file)
index d2c3d78..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-ifeq ($(CONFIG_REALVIEW_HIGH_PHYS_OFFSET),y)
-   zreladdr-y  += 0x70008000
-params_phys-y  := 0x70000100
-initrd_phys-y  := 0x70800000
-else
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
-endif
index 65585392655b3ac499dff837b2f6adfe02d5a708..6964e88760614ba1f8fa60c7f6fd12f3b07dfe20 100644 (file)
@@ -80,7 +80,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
                     virt_to_phys(versatile_secondary_startup));
 }
 
-struct smp_operations realview_dt_smp_ops __initdata = {
+static const struct smp_operations realview_dt_smp_ops __initconst = {
        .smp_prepare_cpus       = realview_smp_prepare_cpus,
        .smp_secondary_init     = versatile_secondary_init,
        .smp_boot_secondary     = versatile_boot_secondary,
index 7c0c420c3016acfb7dcaa74947da675ba3a1f0a7..e5c1888fc67bbfdc461c7c689f76ccf204043b25 100644 (file)
@@ -3,7 +3,8 @@
 #
 # Licensed under GPLv2
 menuconfig ARCH_S3C64XX
-       bool "Samsung S3C64XX" if ARCH_MULTI_V6
+       bool "Samsung S3C64XX"
+       depends on ARCH_MULTI_V6
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_VIC
diff --git a/arch/arm/mach-s3c64xx/Makefile.boot b/arch/arm/mach-s3c64xx/Makefile.boot
deleted file mode 100644 (file)
index c642333..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-   zreladdr-y  += 0x50008000
-params_phys-y  := 0x50000100
index 9cb11215cebaeb2e4e0a8355589ce6d7e5270b78..225c12bb3de91e7662c913ac1b8748dd56dbab8a 100644 (file)
@@ -12,7 +12,8 @@ extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
                              unsigned long arg);
 extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
-extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
+extern void shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
+                                         unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
 extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
 extern struct platform_suspend_ops shmobile_suspend_ops;
@@ -31,8 +32,6 @@ int shmobile_cpufreq_init(void);
 static inline int shmobile_cpufreq_init(void) { return 0; }
 #endif
 
-extern void __iomem *shmobile_scu_base;
-
 static inline void __init shmobile_init_late(void)
 {
        shmobile_suspend_init();
index 57fbff024dcd5dd6ccf23afb94e09d6b0ae47796..634d701c56a7463d9b93b005498362d29e14b777 100644 (file)
@@ -10,6 +10,8 @@
 
 #include <linux/platform_device.h>
 
+#include "common.h"
+
 int __init shmobile_cpufreq_init(void)
 {
        platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/arch/arm/mach-shmobile/emev2.h b/arch/arm/mach-shmobile/emev2.h
new file mode 100644 (file)
index 0000000..916d25f
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_EMEV2_H__
+#define __ASM_EMEV2_H__
+
+extern const struct smp_operations emev2_smp_ops;
+
+#endif /* __ASM_EMEV2_H__ */
index fa5248c52399c9b5e78e3c1cd7c167523f306424..936d7011c3141b73228c923129f29f3635b0df13 100644 (file)
@@ -27,7 +27,7 @@
  */
 ENTRY(shmobile_boot_scu)
                                        @ r0 = SCU base address
-       mrc     p15, 0, r1, c0, c0, 5   @ read MIPDR
+       mrc     p15, 0, r1, c0, c0, 5   @ read MPIDR
        and     r1, r1, #3              @ mask out cpu ID
        lsl     r1, r1, #3              @ we will shift by cpu_id * 8 bits
        ldr     r2, [r0, #8]            @ SCU Power Status Register
@@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu)
 
        b       secondary_startup
 ENDPROC(shmobile_boot_scu)
-
-       .text
-       .align  2
-       .globl  shmobile_scu_base
-shmobile_scu_base:
-       .space  4
index 64663110ab6ca0e1d8f06cc04e69175165e2cace..4c7d2caf3730f644ca9cb39332e9295d5b10c600 100644 (file)
 #include <asm/smp_scu.h>
 #include "common.h"
 
+
+static phys_addr_t shmobile_scu_base_phys;
+static void __iomem *shmobile_scu_base;
+
 static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
                                          unsigned long action, void *hcpu)
 {
@@ -26,7 +30,7 @@ static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
                /* For this particular CPU register SCU SMP boot vector */
                shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
-                                 (unsigned long)shmobile_scu_base);
+                                 shmobile_scu_base_phys);
                break;
        };
 
@@ -37,13 +41,16 @@ static struct notifier_block shmobile_smp_scu_notifier = {
        .notifier_call = shmobile_smp_scu_notifier_call,
 };
 
-void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
+void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
+                                         unsigned int max_cpus)
 {
        /* install boot code shared by all CPUs */
        shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
        shmobile_boot_arg = MPIDR_HWID_BITMASK;
 
        /* enable SCU and cache coherency on booting CPU */
+       shmobile_scu_base_phys = scu_base_phys;
+       shmobile_scu_base = ioremap(scu_base_phys, PAGE_SIZE);
        scu_enable(shmobile_scu_base);
        scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
 
index 10b7cb5dcb3af1e3553761eb5d2f0dbbc33d8bf4..3c99aaf65325cd19860968c24e4cf83703e84970 100644 (file)
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
-#include "common.h"
-
-static struct map_desc emev2_io_desc[] __initdata = {
-#ifdef CONFIG_SMP
-       /* 2M mapping for SCU + L2 controller */
-       {
-               .virtual        = 0xf0000000,
-               .pfn            = __phys_to_pfn(0x1e000000),
-               .length         = SZ_2M,
-               .type           = MT_DEVICE
-       },
-#endif
-};
 
-static void __init emev2_map_io(void)
-{
-       iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
-}
+#include "common.h"
+#include "emev2.h"
 
 static const char *const emev2_boards_compat_dt[] __initconst = {
        "renesas,emev2",
        NULL,
 };
 
-extern const struct smp_operations emev2_smp_ops;
-
 DT_MACHINE_START(EMEV2_DT, "Generic Emma Mobile EV2 (Flattened Device Tree)")
        .smp            = smp_ops(emev2_smp_ops),
-       .map_io         = emev2_map_io,
        .init_early     = shmobile_init_delay,
        .init_late      = shmobile_init_late,
        .dt_compat      = emev2_boards_compat_dt,
index 0c8f80c5b04df34d61ed6a195769f3075fef3a56..db6dbfbaf9f10e804618556144034fa34083c7be 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <asm/hardware/cache-l2x0.h>
 
 #include "common.h"
 
-static struct map_desc r8a7740_io_desc[] __initdata = {
-        /*
-         * for CPGA/INTC/PFC
-         * 0xe6000000-0xefffffff -> 0xe6000000-0xefffffff
-         */
-       {
-               .virtual        = 0xe6000000,
-               .pfn            = __phys_to_pfn(0xe6000000),
-               .length         = 160 << 20,
-               .type           = MT_DEVICE_NONSHARED
-       },
-#ifdef CONFIG_CACHE_L2X0
-       /*
-        * for l2x0_init()
-        * 0xf0100000-0xf0101000 -> 0xf0002000-0xf0003000
-        */
-       {
-               .virtual        = 0xf0002000,
-               .pfn            = __phys_to_pfn(0xf0100000),
-               .length         = PAGE_SIZE,
-               .type           = MT_DEVICE_NONSHARED
-       },
-#endif
-};
-
-static void __init r8a7740_map_io(void)
-{
-       debug_ll_io_init();
-       iotable_init(r8a7740_io_desc, ARRAY_SIZE(r8a7740_io_desc));
-}
-
 /*
  * r8a7740 chip has lasting errata on MERAM buffer.
  * this is work-around for it.
@@ -110,10 +78,6 @@ static void __init r8a7740_generic_init(void)
 {
        r8a7740_meram_workaround();
 
-#ifdef CONFIG_CACHE_L2X0
-       /* Shared attribute override enable, 32K*8way */
-       l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
-#endif
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
@@ -123,7 +87,8 @@ static const char *const r8a7740_boards_compat_dt[] __initconst = {
 };
 
 DT_MACHINE_START(R8A7740_DT, "Generic R8A7740 (Flattened Device Tree)")
-       .map_io         = r8a7740_map_io,
+       .l2c_aux_val    = 0,
+       .l2c_aux_mask   = ~0,
        .init_early     = shmobile_init_delay,
        .init_irq       = r8a7740_init_irq_of,
        .init_machine   = r8a7740_generic_init,
index 9eccde3c7b137151e3fcff5c882cb6bd62870195..6d0ebdfb03a292166dfa3b0979ce810993ecf658 100644 (file)
@@ -182,8 +182,6 @@ static int __init rcar_gen2_scan_mem(unsigned long node, const char *uname,
        return 0;
 }
 
-struct cma *rcar_gen2_dma_contiguous;
-
 void __init rcar_gen2_reserve(void)
 {
        struct memory_reserve_config mrc;
@@ -194,8 +192,11 @@ void __init rcar_gen2_reserve(void)
 
        of_scan_flat_dt(rcar_gen2_scan_mem, &mrc);
 #ifdef CONFIG_DMA_CMA
-       if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size))
+       if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size)) {
+               static struct cma *rcar_gen2_dma_contiguous;
+
                dma_contiguous_reserve_area(mrc.size, mrc.base, 0,
                                            &rcar_gen2_dma_contiguous, true);
+       }
 #endif
 }
index adbac6963f2b962e2350ecff5638271d55cf5046..3a732199cf5e98c0f5cc625c86c75fb182631513 100644 (file)
@@ -21,7 +21,9 @@
 #include <linux/delay.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
+
 #include "common.h"
+#include "emev2.h"
 
 #define EMEV2_SCU_BASE 0x1e000000
 #define EMEV2_SMU_BASE 0xe0110000
@@ -45,8 +47,7 @@ static void __init emev2_smp_prepare_cpus(unsigned int max_cpus)
        }
 
        /* setup EMEV2 specific SCU bits */
-       shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
-       shmobile_smp_scu_prepare_cpus(max_cpus);
+       shmobile_smp_scu_prepare_cpus(EMEV2_SCU_BASE, max_cpus);
 }
 
 const struct smp_operations emev2_smp_ops __initconst = {
index b854fe2095ad14616b7c4aae209b47f7e4f7ded3..f5c31fbc10b2efbf3c341f9efbaa66f261c0f4d4 100644 (file)
@@ -92,12 +92,9 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
 {
        /* Map the reset vector (in headsmp-scu.S, headsmp.S) */
        __raw_writel(__pa(shmobile_boot_vector), AVECR);
-       shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
-       shmobile_boot_arg = (unsigned long)shmobile_scu_base;
 
        /* setup r8a7779 specific SCU bits */
-       shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
-       shmobile_smp_scu_prepare_cpus(max_cpus);
+       shmobile_smp_scu_prepare_cpus(R8A7779_SCU_BASE, max_cpus);
 
        r8a7779_pm_init();
 
index ee1a4b70604bd6eaca0245c10bb6f8e6ec6391d5..41137404382e2f94c61d610988c4e442dc094800 100644 (file)
@@ -52,8 +52,7 @@ static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
        __raw_writel(__pa(shmobile_boot_vector), SBAR);
 
        /* setup sh73a0 specific SCU bits */
-       shmobile_scu_base = IOMEM(SH73A0_SCU_BASE);
-       shmobile_smp_scu_prepare_cpus(max_cpus);
+       shmobile_smp_scu_prepare_cpus(SH73A0_SCU_BASE, max_cpus);
 }
 
 const struct smp_operations sh73a0_smp_ops __initconst = {
index 5d92b5dd486b0b9fd8d501aa3b69f56f96ae1ee2..74b30bade2c15c380c0d3fd721d5aa80a957ef79 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/io.h>
 #include <asm/system_misc.h>
 
+#include "common.h"
+
 static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
 {
        cpu_do_idle();
index c17d4d3881ffc45e5e169af4e91cf46744ac006d..ad008e4b0c49a4aa1be23992be444198d67e41d5 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/delay.h>
 #include <linux/of_address.h>
 
+#include "common.h"
+
 static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
                                           unsigned int mult, unsigned int div)
 {
diff --git a/arch/arm/mach-spear/Makefile.boot b/arch/arm/mach-spear/Makefile.boot
deleted file mode 100644 (file)
index 4674a4c..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-zreladdr-y     += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
index c2be98f38e73b2006ea664b7089dc427a2575d8e..3c156190a1d44223027b4c9a37ab67550219854b 100644 (file)
@@ -69,6 +69,7 @@ MACHINE_END
 static const char * const sun8i_board_dt_compat[] = {
        "allwinner,sun8i-a23",
        "allwinner,sun8i-a33",
+       "allwinner,sun8i-a83t",
        "allwinner,sun8i-h3",
        NULL,
 };
index d6a3714b096e69801c381d320a0ccb1be3472585..ebe15b93bbe870804eb17759b05096a8f7298167 100644 (file)
@@ -1,5 +1,6 @@
 config ARCH_TANGO
-       bool "Sigma Designs Tango4 (SMP87xx)" if ARCH_MULTI_V7
+       bool "Sigma Designs Tango4 (SMP87xx)"
+       depends on ARCH_MULTI_V7
        # Cortex-A9 MPCore r3p0, PL310 r3p2
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARM_ERRATA_754322
index a18d5a34e2f5738e4f62bd24bf8a0b9edff7a3e8..a21f55e000d258c734535cbfc9df57744599887a 100644 (file)
@@ -9,7 +9,7 @@ static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
        return 0;
 }
 
-static struct smp_operations tango_smp_ops __initdata = {
+static const struct smp_operations tango_smp_ops __initconst = {
        .smp_boot_secondary     = tango_boot_secondary,
 };
 
diff --git a/arch/arm/mach-u300/Makefile.boot b/arch/arm/mach-u300/Makefile.boot
deleted file mode 100644 (file)
index 87811de..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-   zreladdr-y  += 0x48008000
-params_phys-y  := 0x48000100
-# This isn't used.
-#initrd_phys-y := 0x48800000
diff --git a/arch/arm/mach-ux500/Makefile.boot b/arch/arm/mach-ux500/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
diff --git a/arch/arm/mach-zynq/Makefile.boot b/arch/arm/mach-zynq/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
index 549f6d3aec5b66457a948f579f04b8b5dd1021cc..4daeda0a5b7faa837093e829cdd6aa63e29603ec 100644 (file)
@@ -1037,24 +1037,26 @@ config ARCH_SUPPORTS_BIG_ENDIAN
          This option specifies the architecture can support big endian
          operation.
 
-config ARM_KERNMEM_PERMS
-       bool "Restrict kernel memory permissions"
+config DEBUG_RODATA
+       bool "Make kernel text and rodata read-only"
        depends on MMU
+       default y if CPU_V7
        help
-         If this is set, kernel memory other than kernel text (and rodata)
-         will be made non-executable. The tradeoff is that each region is
-         padded to section-size (1MiB) boundaries (because their permissions
-         are different and splitting the 1M pages into 4K ones causes TLB
-         performance problems), wasting memory.
+         If this is set, kernel text and rodata memory will be made
+         read-only, and non-text kernel memory will be made non-executable.
+         The tradeoff is that each region is padded to section-size (1MiB)
+         boundaries (because their permissions are different and splitting
+         the 1M pages into 4K ones causes TLB performance problems), which
+         can waste memory.
 
-config DEBUG_RODATA
-       bool "Make kernel text and rodata read-only"
-       depends on ARM_KERNMEM_PERMS
+config DEBUG_ALIGN_RODATA
+       bool "Make rodata strictly non-executable"
+       depends on DEBUG_RODATA
        default y
        help
-         If this is set, kernel text and rodata will be made read-only. This
-         is to help catch accidental or malicious attempts to change the
-         kernel's executable code. Additionally splits rodata from kernel
-         text so it can be made explicitly non-executable. This creates
-         another section-size padded region, so it can waste more memory
-         space while gaining the read-only protections.
+         If this is set, rodata will be made explicitly non-executable. This
+         provides protection on the rare chance that attackers might find and
+         use ROP gadgets that exist in the rodata section. This adds an
+         additional section-aligned split of rodata from kernel text so it
+         can be made explicitly non-executable. This padding may waste memory
+         space to gain the additional protection.
index 1e373d268c04c3e3697ac40aaca1915f436a147b..88255bea65e41e61bd16ed2b12513ebcde7b04bd 100644 (file)
 #include <asm/cputype.h>
 #include <asm/hardware/cache-tauros2.h>
 
+/* CP15 PJ4 Control configuration register */
+#define CCR_L2C_PREFETCH_DISABLE       BIT(24)
+#define CCR_L2C_ECC_ENABLE             BIT(23)
+#define CCR_L2C_WAY7_4_DISABLE         BIT(21)
+#define CCR_L2C_BURST8_ENABLE          BIT(20)
 
 /*
  * When Tauros2 is used on a CPU that supports the v7 hierarchical
@@ -182,18 +187,18 @@ static void enable_extra_feature(unsigned int features)
        u = read_extra_features();
 
        if (features & CACHE_TAUROS2_PREFETCH_ON)
-               u &= ~0x01000000;
+               u &= ~CCR_L2C_PREFETCH_DISABLE;
        else
-               u |= 0x01000000;
+               u |= CCR_L2C_PREFETCH_DISABLE;
        pr_info("Tauros2: %s L2 prefetch.\n",
                        (features & CACHE_TAUROS2_PREFETCH_ON)
                        ? "Enabling" : "Disabling");
 
        if (features & CACHE_TAUROS2_LINEFILL_BURST8)
-               u |= 0x00100000;
+               u |= CCR_L2C_BURST8_ENABLE;
        else
-               u &= ~0x00100000;
-       pr_info("Tauros2: %s line fill burt8.\n",
+               u &= ~CCR_L2C_BURST8_ENABLE;
+       pr_info("Tauros2: %s burst8 line fill.\n",
                        (features & CACHE_TAUROS2_LINEFILL_BURST8)
                        ? "Enabling" : "Disabling");
 
@@ -287,16 +292,15 @@ void __init tauros2_init(unsigned int features)
        node = of_find_matching_node(NULL, tauros2_ids);
        if (!node) {
                pr_info("Not found marvell,tauros2-cache, disable it\n");
-               return;
+       } else {
+               ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
+               if (ret) {
+                       pr_info("Not found marvell,tauros-cache-features property, "
+                               "disable extra features\n");
+                       features = 0;
+               } else
+                       features = f;
        }
-
-       ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
-       if (ret) {
-               pr_info("Not found marvell,tauros-cache-features property, "
-                       "disable extra features\n");
-               features = 0;
-       } else
-               features = f;
 #endif
        tauros2_internal_init(features);
 }
index d65909697165b20cc1936f50073bd3eea7521aec..bd274a05b8ffa9446160d1de69f777d9431c629b 100644 (file)
@@ -15,7 +15,7 @@
  * page tables.
  */
 pgd_t *idmap_pgd;
-phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
+unsigned long (*arch_virt_to_idmap)(unsigned long x);
 
 #ifdef CONFIG_ARM_LPAE
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
index 49bd08178008adc38cca29b5e50bf26aa14a07c9..53f42508025bf63495ed9ded82925f79d8925029 100644 (file)
@@ -572,8 +572,9 @@ void __init mem_init(void)
        }
 }
 
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
 struct section_perm {
+       const char *name;
        unsigned long start;
        unsigned long end;
        pmdval_t mask;
@@ -584,6 +585,7 @@ struct section_perm {
 static struct section_perm nx_perms[] = {
        /* Make pages tables, etc before _stext RW (set NX). */
        {
+               .name   = "pre-text NX",
                .start  = PAGE_OFFSET,
                .end    = (unsigned long)_stext,
                .mask   = ~PMD_SECT_XN,
@@ -591,14 +593,16 @@ static struct section_perm nx_perms[] = {
        },
        /* Make init RW (set NX). */
        {
+               .name   = "init NX",
                .start  = (unsigned long)__init_begin,
                .end    = (unsigned long)_sdata,
                .mask   = ~PMD_SECT_XN,
                .prot   = PMD_SECT_XN,
        },
-#ifdef CONFIG_DEBUG_RODATA
+#ifdef CONFIG_DEBUG_ALIGN_RODATA
        /* Make rodata NX (set RO in ro_perms below). */
        {
+               .name   = "rodata NX",
                .start  = (unsigned long)__start_rodata,
                .end    = (unsigned long)__init_begin,
                .mask   = ~PMD_SECT_XN,
@@ -607,10 +611,10 @@ static struct section_perm nx_perms[] = {
 #endif
 };
 
-#ifdef CONFIG_DEBUG_RODATA
 static struct section_perm ro_perms[] = {
        /* Make kernel code and rodata RX (set RO). */
        {
+               .name   = "text/rodata RO",
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
@@ -623,7 +627,6 @@ static struct section_perm ro_perms[] = {
 #endif
        },
 };
-#endif
 
 /*
  * Updates section permissions only for the current mm (sections are
@@ -670,8 +673,8 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
        for (i = 0; i < n; i++) {
                if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
                    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
-                       pr_err("BUG: section %lx-%lx not aligned to %lx\n",
-                               perms[i].start, perms[i].end,
+                       pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
+                               perms[i].name, perms[i].start, perms[i].end,
                                SECTION_SIZE);
                        continue;
                }
@@ -712,7 +715,6 @@ void fix_kernmem_perms(void)
        stop_machine(__fix_kernmem_perms, NULL, NULL);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
 int __mark_rodata_ro(void *unused)
 {
        update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
@@ -735,11 +737,10 @@ void set_kernel_text_ro(void)
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
                                current->active_mm);
 }
-#endif /* CONFIG_DEBUG_RODATA */
 
 #else
 static inline void fix_kernmem_perms(void) { }
-#endif /* CONFIG_ARM_KERNMEM_PERMS */
+#endif /* CONFIG_DEBUG_RODATA */
 
 void free_tcmmem(void)
 {
index 8085a8aac8124839d889849d4d5877be3fa22252..ffb93db68e9c80b3003e6d93c7e93d25eb76182e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/irq.h>
 #include <linux/sched_clock.h>
 #include <plat/time.h>
+#include <asm/delay.h>
 
 /*
  * MBus bridge block registers.
@@ -188,6 +189,15 @@ orion_time_set_base(void __iomem *_timer_base)
        timer_base = _timer_base;
 }
 
+static unsigned long orion_delay_timer_read(void)
+{
+       return ~readl(timer_base + TIMER0_VAL_OFF);
+}
+
+static struct delay_timer orion_delay_timer = {
+       .read_current_timer = orion_delay_timer_read,
+};
+
 void __init
 orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
                unsigned int irq, unsigned int tclk)
@@ -202,6 +212,9 @@ orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
 
        ticks_per_jiffy = (tclk + HZ/2) / HZ;
 
+       orion_delay_timer.freq = tclk;
+       register_current_timer_delay(&orion_delay_timer);
+
        /*
         * Set scale and timer for sched_clock.
         */
index efa6e85619ad824c2d9ea9a2bdb0b2c0eb9a3077..daf3db9f0058f6d1387f9bf5e3522c6bac0c62f0 100644 (file)
@@ -422,8 +422,7 @@ static int s3c_adc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int s3c_adc_suspend(struct device *dev)
 {
-       struct platform_device *pdev = container_of(dev,
-                       struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        struct adc_device *adc = platform_get_drvdata(pdev);
        unsigned long flags;
        u32 con;
@@ -444,8 +443,7 @@ static int s3c_adc_suspend(struct device *dev)
 
 static int s3c_adc_resume(struct device *dev)
 {
-       struct platform_device *pdev = container_of(dev,
-                       struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        struct adc_device *adc = platform_get_drvdata(pdev);
        enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data;
        int ret;
index b53d4ff3befb69a5e692640443674bca83129f83..84baa16f4c0b674dfe404b54bb9fe09b4fd682bf 100644 (file)
@@ -727,15 +727,6 @@ static int __init s3c_nand_copy_set(struct s3c2410_nand_set *set)
                        return -ENOMEM;
        }
 
-       if (set->ecc_layout) {
-               ptr = kmemdup(set->ecc_layout,
-                             sizeof(struct nand_ecclayout), GFP_KERNEL);
-               set->ecc_layout = ptr;
-
-               if (!ptr)
-                       return -ENOMEM;
-       }
-
        return 0;
 }
 
index f5cf2bd208e0c1b21179c68d0ff2bab1b39eb531..e5557693fa923705df5cd1017fe1cf2427e6c3a8 100644 (file)
@@ -18,7 +18,6 @@
 
 #define S5P_VA_DMC0            S3C_ADDR(0x02440000)
 #define S5P_VA_DMC1            S3C_ADDR(0x02480000)
-#define S5P_VA_SROMC           S3C_ADDR(0x024C0000)
 
 #define S5P_VA_COREPERI_BASE   S3C_ADDR(0x02800000)
 #define S5P_VA_COREPERI(x)     (S5P_VA_COREPERI_BASE + (x))
index 8cc62289a63ed9958c9623acaa5462048cc9287b..af7c045e812b7c7112a41ab5cf2325fb8f3c3c2f 100644 (file)
@@ -14,6 +14,7 @@ config ARM64
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARM_AMBA
        select ARM_ARCH_TIMER
        select ARM_GIC
@@ -49,6 +50,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
        select HAVE_ARCH_KGDB
@@ -537,6 +539,9 @@ config HOTPLUG_CPU
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+       def_bool y
+
 config ARCH_HAS_HOLES_MEMORYMODEL
        def_bool y if SPARSEMEM
 
@@ -756,6 +761,15 @@ endmenu
 
 menu "Boot options"
 
+config ARM64_ACPI_PARKING_PROTOCOL
+       bool "Enable support for the ARM64 ACPI parking protocol"
+       depends on ACPI
+       help
+         Enable support for the ARM64 ACPI parking protocol. If disabled
+         the kernel will not allow booting through the ARM64 ACPI parking
+         protocol even if the corresponding data is present in the ACPI
+         MADT table.
+
 config CMDLINE
        string "Default kernel command string"
        default ""
index 21074f674bdeb707c137a9b87c2a65bde4bbb25a..8a09522752916c5951a41dec856a97b1744069a6 100644 (file)
@@ -60,6 +60,7 @@ config ARCH_ROCKCHIP
        select ARCH_REQUIRE_GPIOLIB
        select PINCTRL
        select PINCTRL_ROCKCHIP
+       select ROCKCHIP_TIMER
        help
          This enables support for the ARMv8 based Rockchip chipsets,
          like the RK3368.
index cd822d8454c0536165810004089c3a7ce5d0068d..307237cfe728b152e350b5fb4f1c1abc18d6fb4c 100644 (file)
@@ -27,6 +27,8 @@ $(warning LSE atomics not supported by binutils)
 endif
 
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr)
+KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS  += $(call cc-option, -mpc-relative-literal-loads)
 KBUILD_AFLAGS  += $(lseinstr)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
index cfdf701e05dfd6f4baf5395289086f2e486337ce..ba84770f789f5cff754fbdf2ac3b7df54ebbadc7 100644 (file)
@@ -1,4 +1,6 @@
-dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb
+dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
+                       amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
+                       husky.dtb
 
 always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
new file mode 100644 (file)
index 0000000..8e3074a
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * DTS file for AMD Seattle Overdrive Development Board
+ * Note: For Seattle Rev.B0
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+/dts-v1/;
+
+/include/ "amd-seattle-soc.dtsi"
+
+/ {
+       model = "AMD Seattle (Rev.B0) Development Board (Overdrive)";
+       compatible = "amd,seattle-overdrive", "amd,seattle";
+
+       chosen {
+               stdout-path = &serial0;
+       };
+
+       psci {
+               compatible   = "arm,psci-0.2";
+               method       = "smc";
+       };
+};
+
+&ccp0 {
+       status = "ok";
+       amd,zlib-support = <1>;
+};
+
+/**
+ * NOTE: In Rev.B, gpio0 is reserved.
+ */
+&gpio1 {
+       status = "ok";
+};
+
+&gpio2 {
+       status = "ok";
+};
+
+&gpio3 {
+       status = "ok";
+};
+
+&gpio4 {
+       status = "ok";
+};
+
+&i2c0 {
+       status = "ok";
+};
+
+&i2c1 {
+       status = "ok";
+};
+
+&pcie0 {
+       status = "ok";
+};
+
+&spi0 {
+       status = "ok";
+};
+
+&spi1 {
+       status = "ok";
+       sdcard0: sdcard@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3200 3400>;
+               pl022,hierarchy = <0>;
+               pl022,interface = <0>;
+               pl022,com-mode = <0x0>;
+               pl022,rx-level-trig = <0>;
+               pl022,tx-level-trig = <0>;
+       };
+};
+
+&ipmi_kcs {
+       status = "ok";
+};
+
+&smb0 {
+       /include/ "amd-seattle-xgbe-b.dtsi"
+};
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
new file mode 100644 (file)
index 0000000..ed5e043
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * DTS file for AMD Seattle Overdrive Development Board
+ * Note: For Seattle Rev.B1
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+/dts-v1/;
+
+/include/ "amd-seattle-soc.dtsi"
+
+/ {
+       model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
+       compatible = "amd,seattle-overdrive", "amd,seattle";
+
+       chosen {
+               stdout-path = &serial0;
+       };
+
+       psci {
+               compatible   = "arm,psci-0.2";
+               method       = "smc";
+       };
+};
+
+&ccp0 {
+       status = "ok";
+       amd,zlib-support = <1>;
+};
+
+/**
+ * NOTE: In Rev.B, gpio0 is reserved.
+ */
+&gpio1 {
+       status = "ok";
+};
+
+&gpio2 {
+       status = "ok";
+};
+
+&gpio3 {
+       status = "ok";
+};
+
+&gpio4 {
+       status = "ok";
+};
+
+&i2c0 {
+       status = "ok";
+};
+
+&i2c1 {
+       status = "ok";
+};
+
+&pcie0 {
+       status = "ok";
+};
+
+&sata1 {
+       status = "ok";
+};
+
+&spi0 {
+       status = "ok";
+};
+
+&spi1 {
+       status = "ok";
+       sdcard0: sdcard@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3200 3400>;
+               pl022,hierarchy = <0>;
+               pl022,interface = <0>;
+               pl022,com-mode = <0x0>;
+               pl022,rx-level-trig = <0>;
+               pl022,tx-level-trig = <0>;
+       };
+};
+
+&ipmi_kcs {
+       status = "ok";
+};
+
+&smb0 {
+       /include/ "amd-seattle-xgbe-b.dtsi"
+};
index 2874d92881fda36c8a9fdb2a2eb2d6900225134e..a7fc059a7cd91987e84b59f43726a2e332425b7d 100644 (file)
@@ -18,8 +18,8 @@
                #size-cells = <2>;
                reg = <0x0 0xe1110000 0 0x1000>,
                      <0x0 0xe112f000 0 0x2000>,
-                     <0x0 0xe1140000 0 0x10000>,
-                     <0x0 0xe1160000 0 0x10000>;
+                     <0x0 0xe1140000 0 0x2000>,
+                     <0x0 0xe1160000 0 0x2000>;
                interrupts = <1 9 0xf04>;
                ranges = <0 0 0 0xe1100000 0 0x100000>;
                v2m0: v2m@e0080000 {
                #size-cells = <2>;
                ranges;
 
-               /* DDR range is 40-bit addressing */
-               dma-ranges = <0x80 0x0 0x80 0x0 0x7f 0xffffffff>;
+               /*
+                * dma-ranges is 40-bit address space containing:
+                * - GICv2m MSI register is at 0xe0080000
+                * - DRAM range [0x8000000000 to 0xffffffffff]
+                */
+               dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
 
                /include/ "amd-seattle-clks.dtsi"
 
                sata0: sata@e0300000 {
                        compatible = "snps,dwc-ahci";
-                       reg = <0 0xe0300000 0 0x800>;
+                       reg = <0 0xe0300000 0 0xf0000>;
                        interrupts = <0 355 4>;
                        clocks = <&sataclk_333mhz>;
                        dma-coherent;
                };
 
+               /* This is for Rev B only */
+               sata1: sata@e0d00000 {
+                       status = "disabled";
+                       compatible = "snps,dwc-ahci";
+                       reg = <0 0xe0d00000 0 0xf0000>;
+                       interrupts = <0 354 4>;
+                       clocks = <&sataclk_333mhz>;
+                       dma-coherent;
+               };
+
                i2c0: i2c@e1000000 {
                        status = "disabled";
                        compatible = "snps,designware-i2c";
                        reg = <0 0xe1000000 0 0x1000>;
                        interrupts = <0 357 4>;
-                       clocks = <&uartspiclk_100mhz>;
+                       clocks = <&miscclk_250mhz>;
+               };
+
+               i2c1: i2c@e0050000 {
+                       status = "disabled";
+                       compatible = "snps,designware-i2c";
+                       reg = <0 0xe0050000 0 0x1000>;
+                       interrupts = <0 340 4>;
+                       clocks = <&miscclk_250mhz>;
                };
 
                serial0: serial@e1010000 {
                spi0: ssp@e1020000 {
                        status = "disabled";
                        compatible = "arm,pl022", "arm,primecell";
-                       #gpio-cells = <2>;
                        reg = <0 0xe1020000 0 0x1000>;
                        spi-controller;
                        interrupts = <0 330 4>;
                spi1: ssp@e1030000 {
                        status = "disabled";
                        compatible = "arm,pl022", "arm,primecell";
-                       #gpio-cells = <2>;
                        reg = <0 0xe1030000 0 0x1000>;
                        spi-controller;
                        interrupts = <0 329 4>;
                        #size-cells = <0>;
                };
 
-               gpio0: gpio@e1040000 {
+               gpio0: gpio@e1040000 { /* Not available to OS for B0 */
                        status = "disabled";
                        compatible = "arm,pl061", "arm,primecell";
                        #gpio-cells = <2>;
                        interrupts = <0 359 4>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
-                       clocks = <&uartspiclk_100mhz>;
+                       clocks = <&miscclk_250mhz>;
                        clock-names = "apb_pclk";
                };
 
-               gpio1: gpio@e1050000 {
+               gpio1: gpio@e1050000 { /* [0:7] */
                        status = "disabled";
                        compatible = "arm,pl061", "arm,primecell";
                        #gpio-cells = <2>;
                        reg = <0 0xe1050000 0 0x1000>;
                        gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <0 358 4>;
-                       clocks = <&uartspiclk_100mhz>;
+                       clocks = <&miscclk_250mhz>;
+                       clock-names = "apb_pclk";
+               };
+
+               gpio2: gpio@e0020000 { /* [8:15] */
+                       status = "disabled";
+                       compatible = "arm,pl061", "arm,primecell";
+                       #gpio-cells = <2>;
+                       reg = <0 0xe0020000 0 0x1000>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupts = <0 366 4>;
+                       clocks = <&miscclk_250mhz>;
+                       clock-names = "apb_pclk";
+               };
+
+               gpio3: gpio@e0030000 { /* [16:23] */
+                       status = "disabled";
+                       compatible = "arm,pl061", "arm,primecell";
+                       #gpio-cells = <2>;
+                       reg = <0 0xe0030000 0 0x1000>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupts = <0 365 4>;
+                       clocks = <&miscclk_250mhz>;
+                       clock-names = "apb_pclk";
+               };
+
+               gpio4: gpio@e0080000 { /* [24] */
+                       status = "disabled";
+                       compatible = "arm,pl061", "arm,primecell";
+                       #gpio-cells = <2>;
+                       reg = <0 0xe0080000 0 0x1000>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupts = <0 361 4>;
+                       clocks = <&miscclk_250mhz>;
                        clock-names = "apb_pclk";
                };
 
                                <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>;
 
                        dma-coherent;
-                       dma-ranges = <0x43000000 0x80 0x0 0x80 0x0 0x7f 0xffffffff>;
+                       dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>;
                        ranges =
                                /* I/O Memory (size=64K) */
                                <0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>,
                                /* 64-bit MMIO (size= 124G) */
                                <0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>;
                };
+
+               /* Perf CCN504 PMU */
+               ccn: ccn@0xe8000000 {
+                       compatible = "arm,ccn-504";
+                       reg = <0x0 0xe8000000 0 0x1000000>;
+                       interrupts = <0 380 4>;
+               };
+
+               ipmi_kcs: kcs@e0010000 {
+                       status = "disabled";
+                       compatible = "ipmi-kcs";
+                       device_type = "ipmi";
+                       reg = <0x0 0xe0010000 0 0x8>;
+                       interrupts = <0 389 4>;
+                       interrupt-names = "ipmi_kcs";
+                       reg-size = <1>;
+                       reg-spacing = <4>;
+               };
        };
 };
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
new file mode 100644 (file)
index 0000000..8e86319
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * DTS file for AMD Seattle XGBE (RevB)
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+       xgmacclk0_dma_250mhz: clk250mhz_0 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk0_dma_250mhz";
+       };
+
+       xgmacclk0_ptp_250mhz: clk250mhz_1 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk0_ptp_250mhz";
+       };
+
+       xgmacclk1_dma_250mhz: clk250mhz_2 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk1_dma_250mhz";
+       };
+
+       xgmacclk1_ptp_250mhz: clk250mhz_3 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk1_ptp_250mhz";
+       };
+
+       xgmac0: xgmac@e0700000 {
+               compatible = "amd,xgbe-seattle-v1a";
+               reg = <0 0xe0700000 0 0x80000>,
+                     <0 0xe0780000 0 0x80000>,
+                     <0 0xe1240800 0 0x00400>, /* SERDES RX/TX0 */
+                     <0 0xe1250000 0 0x00060>, /* SERDES IR 1/2 */
+                     <0 0xe12500f8 0 0x00004>; /* SERDES IR 2/2 */
+               interrupts = <0 325 4>,
+                            <0 346 1>, <0 347 1>, <0 348 1>, <0 349 1>,
+                            <0 323 4>;
+               amd,per-channel-interrupt;
+               amd,speed-set = <0>;
+               amd,serdes-blwc = <1>, <1>, <0>;
+               amd,serdes-cdr-rate = <2>, <2>, <7>;
+               amd,serdes-pq-skew = <10>, <10>, <18>;
+               amd,serdes-tx-amp = <0>, <0>, <0>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <3>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <7>;
+               mac-address = [ 02 A1 A2 A3 A4 A5 ];
+               clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>;
+               clock-names = "dma_clk", "ptp_clk";
+               phy-mode = "xgmii";
+               #stream-id-cells = <16>;
+               dma-coherent;
+       };
+
+       xgmac1: xgmac@e0900000 {
+               compatible = "amd,xgbe-seattle-v1a";
+               reg = <0 0xe0900000 0 0x80000>,
+                     <0 0xe0980000 0 0x80000>,
+                     <0 0xe1240c00 0 0x00400>, /* SERDES RX/TX1 */
+                     <0 0xe1250080 0 0x00060>, /* SERDES IR 1/2 */
+                     <0 0xe12500fc 0 0x00004>; /* SERDES IR 2/2 */
+               interrupts = <0 324 4>,
+                            <0 341 1>, <0 342 1>, <0 343 1>, <0 344 1>,
+                            <0 322 4>;
+               amd,per-channel-interrupt;
+               amd,speed-set = <0>;
+               amd,serdes-blwc = <1>, <1>, <0>;
+               amd,serdes-cdr-rate = <2>, <2>, <7>;
+               amd,serdes-pq-skew = <10>, <10>, <18>;
+               amd,serdes-tx-amp = <0>, <0>, <0>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <3>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <7>;
+               mac-address = [ 02 B1 B2 B3 B4 B5 ];
+               clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>;
+               clock-names = "dma_clk", "ptp_clk";
+               phy-mode = "xgmii";
+               #stream-id-cells = <16>;
+               dma-coherent;
+       };
+
+       xgmac0_smmu: smmu@e0600000 {
+                compatible = "arm,mmu-401";
+                reg = <0 0xe0600000 0 0x10000>;
+                #global-interrupts = <1>;
+                interrupts = /* Uses combined intr for both
+                              * global and context
+                              */
+                             <0 336 4>,
+                             <0 336 4>;
+
+                mmu-masters = <&xgmac0
+                         0  1  2  3  4  5  6  7
+                        16 17 18 19 20 21 22 23
+                >;
+        };
+
+        xgmac1_smmu: smmu@e0800000 {
+                compatible = "arm,mmu-401";
+                reg = <0 0xe0800000 0 0x10000>;
+                #global-interrupts = <1>;
+                interrupts = /* Uses combined intr for both
+                              * global and context
+                              */
+                             <0 335 4>,
+                             <0 335 4>;
+
+                mmu-masters = <&xgmac1
+                         0  1  2  3  4  5  6  7
+                        16 17 18 19 20 21 22 23
+                >;
+        };
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
new file mode 100644 (file)
index 0000000..1381d4b
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
+ * Note: Based-on AMD Seattle Rev.B0
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+/dts-v1/;
+
+/include/ "amd-seattle-soc.dtsi"
+
+/ {
+       model = "Linaro 96Boards Enterprise Edition Server (Husky) Board";
+       compatible = "amd,seattle-overdrive", "amd,seattle";
+
+       chosen {
+               stdout-path = &serial0;
+       };
+
+       psci {
+               compatible   = "arm,psci-0.2";
+               method       = "smc";
+       };
+};
+
+&ccp0 {
+       status = "ok";
+       amd,zlib-support = <1>;
+};
+
+/**
+ * NOTE: In Rev.B, gpio0 is reserved.
+ */
+&gpio1 {
+       status = "ok";
+};
+
+&gpio2 {
+       status = "ok";
+};
+
+&gpio3 {
+       status = "ok";
+};
+
+&gpio4 {
+       status = "ok";
+};
+
+&i2c0 {
+       status = "ok";
+};
+
+&i2c1 {
+       status = "ok";
+};
+
+&pcie0 {
+       status = "ok";
+};
+
+&spi0 {
+       status = "ok";
+};
+
+&spi1 {
+       status = "ok";
+       sdcard0: sdcard@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3200 3400>;
+               pl022,hierarchy = <0>;
+               pl022,interface = <0>;
+               pl022,com-mode = <0x0>;
+               pl022,rx-level-trig = <0>;
+               pl022,tx-level-trig = <0>;
+       };
+};
+
+&smb0 {
+       /include/ "amd-seattle-xgbe-b.dtsi"
+};
index dd5158eb5872396693bba678cda765a719e25e97..e5b59ca9debb1916764746bb168d969572e17771 100644 (file)
                             <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
index da7b6e61325758c547b521c3df675adeb9568d2b..933cba3599189c7f85c6a6adea77eb661be459f5 100644 (file)
@@ -23,9 +23,8 @@ soc0: soc@000000000 {
                };
        };
 
-       dsa: dsa@c7000000 {
+       dsaf0: dsa@c7000000 {
                compatible = "hisilicon,hns-dsaf-v1";
-               dsa_name = "dsaf0";
                mode = "6port-16rss";
                interrupt-parent = <&mbigen_dsa>;
 
@@ -127,7 +126,7 @@ soc0: soc@000000000 {
 
        eth0: ethernet@0{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <0>;
                local-mac-address = [00 00 00 01 00 58];
                status = "disabled";
@@ -135,14 +134,14 @@ soc0: soc@000000000 {
        };
        eth1: ethernet@1{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <1>;
                status = "disabled";
                dma-coherent;
        };
        eth2: ethernet@2{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <2>;
                local-mac-address = [00 00 00 01 00 5a];
                status = "disabled";
@@ -150,7 +149,7 @@ soc0: soc@000000000 {
        };
        eth3: ethernet@3{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <3>;
                local-mac-address = [00 00 00 01 00 5b];
                status = "disabled";
@@ -158,7 +157,7 @@ soc0: soc@000000000 {
        };
        eth4: ethernet@4{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <4>;
                local-mac-address = [00 00 00 01 00 5c];
                status = "disabled";
@@ -166,7 +165,7 @@ soc0: soc@000000000 {
        };
        eth5: ethernet@5{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <5>;
                local-mac-address = [00 00 00 01 00 5d];
                status = "disabled";
@@ -174,7 +173,7 @@ soc0: soc@000000000 {
        };
        eth6: ethernet@6{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <6>;
                local-mac-address = [00 00 00 01 00 5e];
                status = "disabled";
@@ -182,7 +181,7 @@ soc0: soc@000000000 {
        };
        eth7: ethernet@7{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <7>;
                local-mac-address = [00 00 00 01 00 5f];
                status = "disabled";
index 7dfe1c085966de45b27ee1b10f8073af9c5914dc..62f33fc84e3ea7afd11655069a16a45b7e120b90 100644 (file)
@@ -12,6 +12,8 @@
                rtc1 = "/rtc@0,7000e000";
        };
 
+       chosen { };
+
        memory {
                device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x80000000>;
index e8bb46027bed690385f51905a1cab74cea40274d..be0e9d70e6c4a0eb64146e1a2a995a2086da786b 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@0,70006000 {
index bc23f4dea002b51f089b6c42756af16f9c591cca..cd4f45ccd6a72f6fcb9f6c87dee6160fb3c8ceee 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@0,70006000 {
index 8e94af64ee94516166dad513be94613f32a460aa..fa1f661ccccfb21bb71ab0d5918aee5c0742223e 100644 (file)
@@ -1,4 +1,5 @@
 dtb-$(CONFIG_ARCH_QCOM)        += apq8016-sbc.dtb msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM)        += msm8996-mtp.dtb
 
 always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
index db17c5d5689c65ed83b35bc5d51ba4adc290d828..6b4289dd24951ac7f2d5d34466e8d1912424d413 100644 (file)
@@ -24,6 +24,8 @@
                i2c0    = &blsp_i2c2;
                i2c1    = &blsp_i2c6;
                i2c3    = &blsp_i2c4;
+               spi0    = &blsp_spi5;
+               spi1    = &blsp_spi3;
        };
 
        chosen {
                                default-state = "off";
                        };
                };
+
+               sdhci@07824000 {
+                       vmmc-supply = <&pm8916_l8>;
+                       vqmmc-supply = <&pm8916_l5>;
+
+                       pinctrl-names = "default", "sleep";
+                       pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+                       pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+                       status = "okay";
+               };
        };
 };
 
-&sdhc_1 {
-       status = "okay";
+&smd_rpm_regulators {
+       vdd_l1_l2_l3-supply = <&pm8916_s3>;
+       vdd_l5-supply = <&pm8916_s3>;
+       vdd_l4_l5_l6-supply = <&pm8916_s4>;
+       vdd_l7-supply = <&pm8916_s4>;
+
+       s1 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1562000>;
+       };
+
+       s3 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1562000>;
+       };
+
+       s4 {
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+
+               regulator-always-on;
+               regulator-boot-on;
+       };
+
+       l1 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1525000>;
+       };
+
+       l2 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1525000>;
+       };
+
+       l3 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1525000>;
+       };
+
+       l4 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l5 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l6 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l7 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l8 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l9 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l10 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l11 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l12 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l13 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l14 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       /**
+        * 1.8v required on LS expansion
+        * for mezzanine boards
+        */
+       l15 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+               regulator-always-on;
+       };
+
+       l16 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l17 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l18 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
 };
index 955c6f174d4cb16218625a6e590102a6e4a2bbf0..8d050059e9a8019cd35102399149b5010a194dc9 100644 (file)
@@ -81,8 +81,8 @@
                        bias-disable;
                };
                pinconf_cs {
-                       pins = "gpio2";
-                       drive-strength = <2>;
+                       pins = "gpio16";
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio6";
                };
                pinconf {
-                       pins = "gpio4", "gpio5", "gpio6", "gpio7";
+                       pins = "gpio4", "gpio5", "gpio7";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio6";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio10";
                };
                pinconf {
-                       pins = "gpio8", "gpio9", "gpio10", "gpio11";
+                       pins = "gpio8", "gpio9", "gpio11";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio10";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio14";
                };
                pinconf {
-                       pins = "gpio12", "gpio13", "gpio14", "gpio15";
+                       pins = "gpio12", "gpio13", "gpio15";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio14";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio18";
                };
                pinconf {
-                       pins = "gpio16", "gpio17", "gpio18", "gpio19";
+                       pins = "gpio16", "gpio17", "gpio19";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio18";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio22";
                };
                pinconf {
-                       pins = "gpio20", "gpio21", "gpio22", "gpio23";
+                       pins = "gpio20", "gpio21", "gpio23";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio22";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
index 915321479998d63ff70db4c08bc3d8e1cb2cdb50..7705207872a50ed3595e5707083e7a9dc78f2148 100644 (file)
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0>;
+                       next-level-cache = <&L2_0>;
                };
 
                CPU1: cpu@1 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x1>;
+                       next-level-cache = <&L2_0>;
                };
 
                CPU2: cpu@2 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x2>;
+                       next-level-cache = <&L2_0>;
                };
 
                CPU3: cpu@3 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x3>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache {
+                     compatible = "cache";
+                     cache-level = <2>;
                };
        };
 
                        #interrupt-cells = <2>;
                };
 
-               gcc: qcom,gcc@1800000 {
+               gcc: clock-controller@1800000 {
                        compatible = "qcom,gcc-msm8916";
                        #clock-cells = <1>;
                        #reset-cells = <1>;
                                compatible = "qcom,rpm-msm8916";
                                qcom,smd-channels = "rpm_requests";
 
-                               pm8916-regulators {
+                               rpmcc: qcom,rpmcc {
+                                       compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+                                       #clock-cells = <1>;
+                               };
+
+                               smd_rpm_regulators: pm8916-regulators {
                                        compatible = "qcom,rpm-pm8916-regulators";
 
                                        pm8916_s1: s1 {};
-                                       pm8916_s2: s2 {};
                                        pm8916_s3: s3 {};
                                        pm8916_s4: s4 {};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts
new file mode 100644 (file)
index 0000000..619af44
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8996-mtp.dtsi"
+
+/ {
+       model = "Qualcomm Technologies, Inc. MSM 8996 MTP";
+       compatible = "qcom,msm8996-mtp";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
new file mode 100644 (file)
index 0000000..9bab5c0
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+
+/ {
+       aliases {
+               serial0 = &blsp2_uart1;
+       };
+
+       chosen {
+               stdout-path = "serial0";
+       };
+
+       soc {
+               serial@75b0000 {
+                       status = "okay";
+               };
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
new file mode 100644 (file)
index 0000000..2c2736d
--- /dev/null
@@ -0,0 +1,267 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+
+/ {
+       model = "Qualcomm Technologies, Inc. MSM8996";
+
+       interrupt-parent = <&intc>;
+
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       chosen { };
+
+       memory {
+               device_type = "memory";
+               /* We expect the bootloader to fill in the reg */
+               reg = <0 0 0 0>;
+       };
+
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x0>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_0>;
+                       L2_0: l2-cache {
+                             compatible = "cache";
+                             cache-level = <2>;
+                       };
+               };
+
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x1>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_0>;
+               };
+
+               CPU2: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x100>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_1>;
+                       L2_1: l2-cache {
+                             compatible = "cache";
+                             cache-level = <2>;
+                       };
+               };
+
+               CPU3: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x101>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_1>;
+               };
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+
+                       cluster1 {
+                               core0 {
+                                       cpu = <&CPU2>;
+                               };
+
+                               core1 {
+                                       cpu = <&CPU3>;
+                               };
+                       };
+               };
+       };
+
+       timer {
+               compatible = "arm,armv8-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       clocks {
+               xo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+                       clock-output-names = "xo_board";
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32764>;
+                       clock-output-names = "sleep_clk";
+               };
+       };
+
+       psci {
+               compatible = "arm,psci-1.0";
+               method = "smc";
+       };
+
+       soc: soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0 0 0 0xffffffff>;
+               compatible = "simple-bus";
+
+               intc: interrupt-controller@9bc0000 {
+                       compatible = "arm,gic-v3";
+                       #interrupt-cells = <3>;
+                       interrupt-controller;
+                       #redistributor-regions = <1>;
+                       redistributor-stride = <0x0 0x40000>;
+                       reg = <0x09bc0000 0x10000>,
+                             <0x09c00000 0x100000>;
+                       interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+               };
+
+               gcc: clock-controller@300000 {
+                       compatible = "qcom,gcc-msm8996";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+                       reg = <0x300000 0x90000>;
+               };
+
+               blsp2_uart1: serial@75b0000 {
+                       compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+                       reg = <0x75b0000 0x1000>;
+                       interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+                                <&gcc GCC_BLSP2_AHB_CLK>;
+                       clock-names = "core", "iface";
+                       status = "disabled";
+               };
+
+               pinctrl@1010000 {
+                       compatible = "qcom,msm8996-pinctrl";
+                       reg = <0x01010000 0x300000>;
+                       interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               timer@09840000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges;
+                       compatible = "arm,armv7-timer-mem";
+                       reg = <0x09840000 0x1000>;
+                       clock-frequency = <19200000>;
+
+                       frame@9850000 {
+                               frame-number = <0>;
+                               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09850000 0x1000>,
+                                     <0x09860000 0x1000>;
+                       };
+
+                       frame@9870000 {
+                               frame-number = <1>;
+                               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09870000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@9880000 {
+                               frame-number = <2>;
+                               interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09880000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@9890000 {
+                               frame-number = <3>;
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09890000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@98a0000 {
+                               frame-number = <4>;
+                               interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x098a0000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@98b0000 {
+                               frame-number = <5>;
+                               interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x098b0000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@98c0000 {
+                               frame-number = <6>;
+                               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x098c0000 0x1000>;
+                               status = "disabled";
+                       };
+               };
+
+               spmi_bus: qcom,spmi@400f000 {
+                       compatible = "qcom,spmi-pmic-arb";
+                       reg = <0x400f000 0x1000>,
+                             <0x4400000 0x800000>,
+                             <0x4c00000 0x800000>,
+                             <0x5800000 0x200000>,
+                             <0x400a000 0x002100>;
+                       reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+                       interrupt-names = "periph_irq";
+                       interrupts = <GIC_SPI 326 IRQ_TYPE_NONE>;
+                       qcom,ee = <0>;
+                       qcom,channel = <0>;
+                       #address-cells = <2>;
+                       #size-cells = <0>;
+                       interrupt-controller;
+                       #interrupt-cells = <4>;
+               };
+
+               mmcc: clock-controller@8c0000 {
+                       compatible = "qcom,mmcc-msm8996";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+                       reg = <0x8c0000 0x40000>;
+                       assigned-clocks = <&mmcc MMPLL9_PLL>,
+                                         <&mmcc MMPLL1_PLL>,
+                                         <&mmcc MMPLL3_PLL>,
+                                         <&mmcc MMPLL4_PLL>,
+                                         <&mmcc MMPLL5_PLL>;
+                       assigned-clock-rates = <624000000>,
+                                              <810000000>,
+                                              <980000000>,
+                                              <960000000>,
+                                              <825000000>;
+               };
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8004.dtsi b/arch/arm64/boot/dts/qcom/pm8004.dtsi
new file mode 100644 (file)
index 0000000..ef2207a
--- /dev/null
@@ -0,0 +1,19 @@
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+       pmic@4 {
+               compatible = "qcom,pm8004", "qcom,spmi-pmic";
+               reg = <0x4 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
+       pmic@5 {
+               compatible = "qcom,pm8004", "qcom,spmi-pmic";
+               reg = <0x5 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
new file mode 100644 (file)
index 0000000..1732f1d
--- /dev/null
@@ -0,0 +1,62 @@
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+       pmic@0 {
+               compatible = "qcom,pm8994", "qcom,spmi-pmic";
+               reg = <0x0 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               pm8994_gpios: gpios@c000 {
+                       compatible = "qcom,pm8994-gpio";
+                       reg = <0xc000 0x1500>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupts = <0 0xc0 0 IRQ_TYPE_NONE>,
+                                    <0 0xc1 0 IRQ_TYPE_NONE>,
+                                    <0 0xc2 0 IRQ_TYPE_NONE>,
+                                    <0 0xc3 0 IRQ_TYPE_NONE>,
+                                    <0 0xc4 0 IRQ_TYPE_NONE>,
+                                    <0 0xc5 0 IRQ_TYPE_NONE>,
+                                    <0 0xc6 0 IRQ_TYPE_NONE>,
+                                    <0 0xc7 0 IRQ_TYPE_NONE>,
+                                    <0 0xc8 0 IRQ_TYPE_NONE>,
+                                    <0 0xc9 0 IRQ_TYPE_NONE>,
+                                    <0 0xca 0 IRQ_TYPE_NONE>,
+                                    <0 0xcb 0 IRQ_TYPE_NONE>,
+                                    <0 0xcc 0 IRQ_TYPE_NONE>,
+                                    <0 0xcd 0 IRQ_TYPE_NONE>,
+                                    <0 0xce 0 IRQ_TYPE_NONE>,
+                                    <0 0xd0 0 IRQ_TYPE_NONE>,
+                                    <0 0xd1 0 IRQ_TYPE_NONE>,
+                                    <0 0xd2 0 IRQ_TYPE_NONE>,
+                                    <0 0xd3 0 IRQ_TYPE_NONE>,
+                                    <0 0xd4 0 IRQ_TYPE_NONE>,
+                                    <0 0xd5 0 IRQ_TYPE_NONE>;
+               };
+
+               pm8994_mpps: mpps@a000 {
+                       compatible = "qcom,pm8994-mpp";
+                       reg = <0xa000 0x700>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupts = <0 0xa0 0 IRQ_TYPE_NONE>,
+                                    <0 0xa1 0 IRQ_TYPE_NONE>,
+                                    <0 0xa2 0 IRQ_TYPE_NONE>,
+                                    <0 0xa3 0 IRQ_TYPE_NONE>,
+                                    <0 0xa4 0 IRQ_TYPE_NONE>,
+                                    <0 0xa5 0 IRQ_TYPE_NONE>,
+                                    <0 0xa6 0 IRQ_TYPE_NONE>,
+                                    <0 0xa7 0 IRQ_TYPE_NONE>;
+               };
+       };
+
+       pmic@1 {
+               compatible = "qcom,pm8994", "qcom,spmi-pmic";
+               reg = <0x1 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
new file mode 100644 (file)
index 0000000..d3879a4
--- /dev/null
@@ -0,0 +1,19 @@
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+       pmic@2 {
+               compatible = "qcom,pmi8994", "qcom,spmi-pmic";
+               reg = <0x2 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
+       pmic@3 {
+               compatible = "qcom,pmi8994", "qcom,spmi-pmic";
+               reg = <0x3 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+};
index 265d12ff6022208f94891149c065951668fd2b15..31ace9c1f79dc70ceb8bfda66e1b83ecad399527 100644 (file)
@@ -93,6 +93,9 @@
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif1_pins: scif1 {
                renesas,groups = "scif1_data_a", "scif1_ctrl";
                renesas,function = "scif1";
                renesas,groups = "scif2_data_a";
                renesas,function = "scif2";
        };
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk_a";
+               renesas,function = "scif_clk";
+       };
 
        i2c2_pins: i2c2 {
                renesas,groups = "i2c2_a";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &i2c2 {
        pinctrl-0 = <&i2c2_pins>;
        pinctrl-names = "default";
                interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
        };
 };
+
+&xhci0 {
+       status = "okay";
+};
index bb353cde125333b9f0df4dadcd817316d4ee3f08..b5e46e4ff72ad003708c1189f94b1e3f1c1539cd 100644 (file)
@@ -39,6 +39,7 @@
                        compatible = "arm,cortex-a57", "arm,armv8";
                        reg = <0x0>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
 
                        compatible = "arm,cortex-a57","arm,armv8";
                        reg = <0x1>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
                a57_2: cpu@2 {
                        compatible = "arm,cortex-a57","arm,armv8";
                        reg = <0x2>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
                a57_3: cpu@3 {
                        compatible = "arm,cortex-a57","arm,armv8";
                        reg = <0x3>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
        };
 
+       L2_CA57: cache-controller@0 {
+               compatible = "cache";
+       };
+
        extal_clk: extal {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <0>;
        };
 
+       /* External SCIF clock - to be overridden by boards that provide it */
+       scif_clk: scif {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <0>;
+               status = "disabled";
+       };
+
        soc {
                compatible = "simple-bus";
                interrupt-parent = <&gic>;
                        power-domains = <&cpg>;
                };
 
-               pmu {
-                       compatible = "arm,armv8-pmuv3";
+               pmu_a57 {
+                       compatible = "arm,cortex-a57-pmu";
                        interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
                };
 
                dmac0: dma-controller@e6700000 {
-                       /* Empty node for now */
+                       compatible = "renesas,dmac-r8a7795",
+                                    "renesas,rcar-dmac";
+                       reg = <0 0xe6700000 0 0x10000>;
+                       interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "error",
+                                       "ch0", "ch1", "ch2", "ch3",
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15";
+                       clocks = <&cpg CPG_MOD 219>;
+                       clock-names = "fck";
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <16>;
                };
 
                dmac1: dma-controller@e7300000 {
-                       /* Empty node for now */
+                       compatible = "renesas,dmac-r8a7795",
+                                    "renesas,rcar-dmac";
+                       reg = <0 0xe7300000 0 0x10000>;
+                       interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "error",
+                                       "ch0", "ch1", "ch2", "ch3",
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15";
+                       clocks = <&cpg CPG_MOD 218>;
+                       clock-names = "fck";
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <16>;
                };
 
                dmac2: dma-controller@e7310000 {
-                       /* Empty node for now */
+                       compatible = "renesas,dmac-r8a7795",
+                                    "renesas,rcar-dmac";
+                       reg = <0 0xe7310000 0 0x10000>;
+                       interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "error",
+                                       "ch0", "ch1", "ch2", "ch3",
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15";
+                       clocks = <&cpg CPG_MOD 217>;
+                       clock-names = "fck";
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <16>;
                };
 
                avb: ethernet@e6800000 {
                };
 
                hscif0: serial@e6540000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe6540000 0 96>;
                        interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 520>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 520>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x31>, <&dmac1 0x30>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif1: serial@e6550000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe6550000 0 96>;
                        interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 519>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 519>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x33>, <&dmac1 0x32>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif2: serial@e6560000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe6560000 0 96>;
                        interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 518>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 518>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x35>, <&dmac1 0x34>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif3: serial@e66a0000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe66a0000 0 96>;
                        interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 517>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 517>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x37>, <&dmac0 0x36>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif4: serial@e66b0000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe66b0000 0 96>;
                        interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 516>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 516>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x39>, <&dmac0 0x38>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif0: serial@e6e60000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6e60000 0 64>;
                        interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 207>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 207>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x51>, <&dmac1 0x50>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif1: serial@e6e68000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6e68000 0 64>;
                        interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 206>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 206>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x53>, <&dmac1 0x52>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif2: serial@e6e88000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6e88000 0 64>;
                        interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 310>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 310>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x13>, <&dmac1 0x12>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif3: serial@e6c50000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6c50000 0 64>;
                        interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 204>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 204>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x57>, <&dmac0 0x56>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif4: serial@e6c40000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6c40000 0 64>;
                        interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 203>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 203>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x59>, <&dmac0 0x58>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif5: serial@e6f30000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6f30000 0 64>;
                        interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 202>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 202>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x5b>, <&dmac1 0x5a>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                        clocks = <&cpg CPG_MOD 815>;
                        status = "disabled";
                };
+
+               xhci0: usb@ee000000 {
+                       compatible = "renesas,xhci-r8a7795";
+                       reg = <0 0xee000000 0 0xc00>;
+                       interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&cpg CPG_MOD 328>;
+                       power-domains = <&cpg>;
+                       status = "disabled";
+               };
+
+               xhci1: usb@ee0400000 {
+                       compatible = "renesas,xhci-r8a7795";
+                       reg = <0 0xee040000 0 0xc00>;
+                       interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&cpg CPG_MOD 327>;
+                       power-domains = <&cpg>;
+                       status = "disabled";
+               };
+
+               usb_dmac0: dma-controller@e65a0000 {
+                       compatible = "renesas,r8a7795-usb-dmac",
+                                    "renesas,usb-dmac";
+                       reg = <0 0xe65a0000 0 0x100>;
+                       interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "ch0", "ch1";
+                       clocks = <&cpg CPG_MOD 330>;
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <2>;
+               };
+
+               usb_dmac1: dma-controller@e65b0000 {
+                       compatible = "renesas,r8a7795-usb-dmac",
+                                    "renesas,usb-dmac";
+                       reg = <0 0xe65b0000 0 0x100>;
+                       interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "ch0", "ch1";
+                       clocks = <&cpg CPG_MOD 331>;
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <2>;
+               };
        };
 };
index 8c219ccf67a3b4bccba2281e398d77cd450de375..6e27b22704df5630b8a3315ac14cc7032f660695 100644 (file)
                pinctrl-0 = <&pwr_key>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
index 104cbee762bb116d37d06e39ec2b2e644e3adfeb..1f2b642e794ae98219ceb49a58bc177028982dc7 100644 (file)
@@ -71,7 +71,7 @@
                pinctrl-0 = <&pwr_key>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
index 122777b1441e8b7f4f3f129659bd42ce1d0f94b9..49d119103e31fff10ce4c94698e4cc35e60d9863 100644 (file)
                compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
                reg = <0x0 0xff0c0000 0x0 0x4000>;
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+                        <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
                compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
                reg = <0x0 0xff0f0000 0x0 0x4000>;
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+                        <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
index 18ca9fb9e65f667c0679b3dbdb80166442723bb7..91ae2634cae9b9102567e2dfec92622457e454e4 100644 (file)
@@ -16,7 +16,6 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
 CONFIG_CGROUP_HUGETLB=y
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
@@ -37,15 +36,13 @@ CONFIG_ARCH_EXYNOS7=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_132_SOC=y
-CONFIG_ARCH_TEGRA_210_SOC=y
-CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SPRD=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_UNIPHIER=y
@@ -54,14 +51,19 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_RCAR_GEN2_PCIE=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
-CONFIG_SMP=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
+CONFIG_XEN=y
 CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
@@ -100,7 +102,11 @@ CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
 CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
 CONFIG_NET_XGENE=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
 CONFIG_SKY2=y
 CONFIG_RAVB=y
 CONFIG_SMC91X=y
@@ -117,39 +123,41 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
-CONFIG_SERIAL_SAMSUNG_UARTS_4=y
-CONFIG_SERIAL_SAMSUNG_UARTS=4
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=11
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
 CONFIG_I2C_QUP=y
+CONFIG_I2C_UNIPHIER_F=y
 CONFIG_I2C_RCAR=y
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPMI=y
 CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_PL061=y
 CONFIG_GPIO_RCAR=y
 CONFIG_GPIO_XGENE=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
+CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_FB=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -162,13 +170,21 @@ CONFIG_SND_SOC=y
 CONFIG_SND_SOC_RCAR=y
 CONFIG_SND_SOC_AK4613=y
 CONFIG_USB=y
+CONFIG_USB_OTG=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
 CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_USB_MSM_OTG=y
 CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
@@ -176,8 +192,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SPI=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -187,28 +201,34 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_DMADEVICES=y
-CONFIG_RCAR_DMAC=y
 CONFIG_QCOM_BAM_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
 CONFIG_COMMON_CLK_CS2000_CP=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8916=y
 CONFIG_HWSPINLOCK_QCOM=y
-# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ARM_SMMU=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD_RPM=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_HISILICON_IRQ_MBIGEN=y
+CONFIG_EXTCON_USB_GPIO=y
 CONFIG_PHY_XGENE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
@@ -239,6 +259,7 @@ CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_FTRACE is not set
 CONFIG_MEMTEST=y
 CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
index 70fd9ffb58cfc08e82f3a978e6630c70eaeb1840..cff532a6744e937015371c80e5533a0fe660b611 100644 (file)
@@ -1,5 +1,3 @@
-
-
 generic-y += bug.h
 generic-y += bugs.h
 generic-y += checksum.h
@@ -31,7 +29,6 @@ generic-y += msgbuf.h
 generic-y += msi.h
 generic-y += mutex.h
 generic-y += pci.h
-generic-y += pci-bridge.h
 generic-y += poll.h
 generic-y += preempt.h
 generic-y += resource.h
index caafd63b8092d8102401112d811b1055f4cc3524..aee323b13802ad143e9774d2076a4b1c63731ece 100644 (file)
@@ -87,9 +87,26 @@ void __init acpi_init_cpus(void);
 static inline void acpi_init_cpus(void) { }
 #endif /* CONFIG_ACPI */
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+bool acpi_parking_protocol_valid(int cpu);
+void __init
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
+#else
+static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
+static inline void
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
+{}
+#endif
+
 static inline const char *acpi_get_enable_method(int cpu)
 {
-       return acpi_psci_present() ? "psci" : NULL;
+       if (acpi_psci_present())
+               return "psci";
+
+       if (acpi_parking_protocol_valid(cpu))
+               return "parking-protocol";
+
+       return NULL;
 }
 
 #ifdef CONFIG_ACPI_APEI
index 81151b67b26bf61fd756540f5643d8b5ab6882c7..ebf2481889c34848be0b34158647a4f60b770090 100644 (file)
 #define MIN_FDT_ALIGN          8
 #define MAX_FDT_SIZE           SZ_2M
 
+/*
+ * arm64 requires the kernel image to placed
+ * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ */
+#define MIN_KIMG_ALIGN         SZ_2M
+
 #endif
index 8f271b83f9106c7c9753ce2601d3b59e1ffbdfc5..8d56bd8550dc8e60c139092d8ef97fe4c4fcfc2e 100644 (file)
@@ -30,8 +30,9 @@
 #define ARM64_HAS_LSE_ATOMICS                  5
 #define ARM64_WORKAROUND_CAVIUM_23154          6
 #define ARM64_WORKAROUND_834220                        7
+#define ARM64_HAS_NO_HW_PREFETCH               8
 
-#define ARM64_NCAPS                            8
+#define ARM64_NCAPS                            9
 
 #ifndef __ASSEMBLY__
 
index 1a5949364ed0f43eee2be4b61c3497fe4fdbbb7b..7540284a17fe7d2569602e883c8f9443340ce378 100644 (file)
 #define MIDR_IMPLEMENTOR(midr) \
        (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
-#define MIDR_CPU_PART(imp, partnum) \
+#define MIDR_CPU_MODEL(imp, partnum) \
        (((imp)                 << MIDR_IMPLEMENTOR_SHIFT) | \
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
 
+#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+                            MIDR_ARCHITECTURE_MASK)
+
+#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)           \
+({                                                                     \
+       u32 _model = (midr) & MIDR_CPU_MODEL_MASK;                      \
+       u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);     \
+                                                                       \
+       _model == (model) && rv >= (rv_min) && rv <= (rv_max);          \
+ })
+
 #define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_APM                        0x50
 #define ARM_CPU_IMP_CAVIUM             0x43
 
 #define CAVIUM_CPU_PART_THUNDERX       0x0A1
 
+#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+
 #ifndef __ASSEMBLY__
 
 /*
index 309704544d22763d6348095814fbdce935172c1b..1a617d46fce93247cf42fd0cda36a1355fc89aa9 100644 (file)
@@ -62,6 +62,16 @@ enum fixed_addresses {
 
        FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
        FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+       /*
+        * Used for kernel page table creation, so unmapped memory may be used
+        * for tables.
+        */
+       FIX_PTE,
+       FIX_PMD,
+       FIX_PUD,
+       FIX_PGD,
+
        __end_of_fixed_addresses
 };
 
index 007a69fc4f408d5f2f7e58f4070b6cb354a5e022..5f3ab8c1db55cca8dbf4c9e1fc315e90335b6d60 100644 (file)
@@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "      prfm    pstl1strm, %2\n"
 "1:    ldxr    %w1, %2\n"
 "      sub     %w3, %w1, %w4\n"
@@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 "      .align  3\n"
 "      .quad   1b, 4b, 2b, 4b\n"
 "      .popsection\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
        : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
        : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
        : "memory");
index a57601f9d17cdffb1122e2864ae5353273eb59ee..8740297dac775dac5bac2bb9260fca62df7d0fb9 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 5
+#define NR_IPI 6
 
 typedef struct {
        unsigned int __softirq_pending;
index 2774fa384c47f27b4e936644cef0e980f7fbcfe7..71ad0f93eb7153226a43d78e909d3e7c3690bf92 100644 (file)
@@ -7,13 +7,14 @@
 
 #include <linux/linkage.h>
 #include <asm/memory.h>
+#include <asm/pgtable-types.h>
 
 /*
  * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
  * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
  */
 #define KASAN_SHADOW_START      (VA_START)
-#define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+#define KASAN_SHADOW_END        (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
 
 /*
  * This value is used to map an address to the corresponding shadow
 #define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << (64 - 3)))
 
 void kasan_init(void);
+void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
 
 #else
 static inline void kasan_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *pgdir) { }
 #endif
 
 #endif
index a459714ee29e38fbf81f2061026933736ed1cbfc..5c6375d8528bb8ddd313bfa2911f7a0d77819028 100644 (file)
 #define SWAPPER_MM_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
 #endif
 
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define ARM64_MEMSTART_ALIGN   SZ_512M
+#else
+#define ARM64_MEMSTART_ALIGN   SZ_1G
+#endif
 
 #endif /* __ASM_KERNEL_PGTABLE_H */
index 738a95f93e493e3002ac8749857f8599c5ae3404..bef6e9243c636b2aab05688f79de4f37a00154b3 100644 (file)
 #define CPTR_EL2_TCPAC (1 << 31)
 #define CPTR_EL2_TTA   (1 << 20)
 #define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
+#define CPTR_EL2_DEFAULT       0x000033ff
 
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_TDRA          (1 << 11)
index 52b777b7d407cfd9c2fe5036c1c7ae96bf025c0b..054ac25e7c2e7c7d9b9f2c3ef32286c35ed9c254 100644 (file)
@@ -26,6 +26,8 @@
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT    0
 #define KVM_ARM64_DEBUG_DIRTY          (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+#define kvm_ksym_ref(sym)              phys_to_virt((u64)&sym - kimage_voffset)
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
index 3066328cd86b69a91274e0cb841059b428666140..779a5872a2c5fb5f9aa9b49af6f77391aefc2336 100644 (file)
@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
 
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 {
-       u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+       u32 mode;
 
-       if (vcpu_mode_is_32bit(vcpu))
+       if (vcpu_mode_is_32bit(vcpu)) {
+               mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
                return mode > COMPAT_PSR_MODE_USR;
+       }
+
+       mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
 
        return mode != PSR_MODE_EL0t;
 }
index 689d4c95e12fbd0dd7c1cf7ca9521918f37aaab0..e3d67ff8798bec6f5c773ad34c5f1edc67898d8c 100644 (file)
@@ -307,7 +307,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 
-u64 kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
@@ -328,8 +328,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
         * Call initialization code, and switch to the full blown
         * HYP code.
         */
-       kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
-                    hyp_stack_ptr, vector_ptr);
+       __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+                      hyp_stack_ptr, vector_ptr);
 }
 
 static inline void kvm_arch_hardware_disable(void) {}
@@ -343,4 +343,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
 
+#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
 #endif /* __ARM64_KVM_HOST_H__ */
index 853953cd1f0813fd562b68b7cdddd95582f1e392..61005e7dd6cb17ea8a950d389015593b1a124f94 100644 (file)
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
 #define VA_BITS                        (CONFIG_ARM64_VA_BITS)
 #define VA_START               (UL(0xffffffffffffffff) << VA_BITS)
 #define PAGE_OFFSET            (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define MODULES_END            (PAGE_OFFSET)
-#define MODULES_VADDR          (MODULES_END - SZ_64M)
-#define PCI_IO_END             (MODULES_VADDR - SZ_2M)
+#define KIMAGE_VADDR           (MODULES_END)
+#define MODULES_END            (MODULES_VADDR + MODULES_VSIZE)
+#define MODULES_VADDR          (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VSIZE          (SZ_64M)
+#define PCI_IO_END             (PAGE_OFFSET - SZ_2M)
 #define PCI_IO_START           (PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP            (PCI_IO_START - SZ_2M)
 #define TASK_SIZE_64           (UL(1) << VA_BITS)
 
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 4))
 
+/*
+ * The size of the KASAN shadow region. This should be 1/8th of the
+ * size of the entire kernel virtual address space.
+ */
+#ifdef CONFIG_KASAN
+#define KASAN_SHADOW_SIZE      (UL(1) << (VA_BITS - 3))
+#else
+#define KASAN_SHADOW_SIZE      (0)
+#endif
+
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
-#define __virt_to_phys(x)      (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
+#define __virt_to_phys(x) ({                                           \
+       phys_addr_t __x = (phys_addr_t)(x);                             \
+       __x >= PAGE_OFFSET ? (__x - PAGE_OFFSET + PHYS_OFFSET) :        \
+                            (__x - kimage_voffset); })
+
 #define __phys_to_virt(x)      ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
+#define __phys_to_kimg(x)      ((unsigned long)((x) + kimage_voffset))
 
 /*
  * Convert a page to/from a physical address
 #define MT_S2_NORMAL           0xf
 #define MT_S2_DEVICE_nGnRE     0x1
 
+#ifdef CONFIG_ARM64_4K_PAGES
+#define IOREMAP_MAX_ORDER      (PUD_SHIFT)
+#else
+#define IOREMAP_MAX_ORDER      (PMD_SHIFT)
+#endif
+
 #ifndef __ASSEMBLY__
 
 extern phys_addr_t             memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
 #define PHYS_OFFSET            ({ memstart_addr; })
 
+/* the offset between the kernel virtual and physical mappings */
+extern u64                     kimage_voffset;
+
 /*
- * The maximum physical address that the linear direct mapping
- * of system RAM can cover. (PAGE_OFFSET can be interpreted as
- * a 2's complement signed quantity and negated to derive the
- * maximum size of the linear mapping.)
+ * Allow all memory at the discovery stage. We will clip it later.
  */
-#define MAX_MEMBLOCK_ADDR      ({ memstart_addr - PAGE_OFFSET - 1; })
+#define MIN_MEMBLOCK_ADDR      0
+#define MAX_MEMBLOCK_ADDR      U64_MAX
 
 /*
  * PFNs are used to describe any physical page; this means
index 24165784b8038b732ea568d1e74fd8c0a699b914..a00f7cf35bbd4d80ce045bfeb0cbb6bd061aeaaa 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 static inline void contextidr_thread_switch(struct task_struct *next)
@@ -48,7 +49,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-       unsigned long ttbr = page_to_phys(empty_zero_page);
+       unsigned long ttbr = virt_to_phys(empty_zero_page);
 
        asm(
        "       msr     ttbr0_el1, %0                   // set TTBR0\n"
@@ -73,7 +74,7 @@ static inline bool __cpu_uses_extended_idmap(void)
 /*
  * Set TCR.T0SZ to its default value (based on VA_BITS)
  */
-static inline void cpu_set_default_tcr_t0sz(void)
+static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
 {
        unsigned long tcr;
 
@@ -86,7 +87,62 @@ static inline void cpu_set_default_tcr_t0sz(void)
        "       msr     tcr_el1, %0     ;"
        "       isb"
        : "=&r" (tcr)
-       : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+       : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+}
+
+#define cpu_set_default_tcr_t0sz()     __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_idmap_tcr_t0sz()       __cpu_set_tcr_t0sz(idmap_t0sz)
+
+/*
+ * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
+ *
+ * The idmap lives in the same VA range as userspace, but uses global entries
+ * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
+ * speculative TLB fetches, we must temporarily install the reserved page
+ * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
+ *
+ * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
+ * which should not be installed in TTBR0_EL1. In this case we can leave the
+ * reserved page tables in place.
+ */
+static inline void cpu_uninstall_idmap(void)
+{
+       struct mm_struct *mm = current->active_mm;
+
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       cpu_set_default_tcr_t0sz();
+
+       if (mm != &init_mm)
+               cpu_switch_mm(mm->pgd, mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       cpu_set_idmap_tcr_t0sz();
+
+       cpu_switch_mm(idmap_pg_dir, &init_mm);
+}
+
+/*
+ * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
+ * avoiding the possibility of conflicting TLB entries being allocated.
+ */
+static inline void cpu_replace_ttbr1(pgd_t *pgd)
+{
+       typedef void (ttbr_replace_func)(phys_addr_t);
+       extern ttbr_replace_func idmap_cpu_replace_ttbr1;
+       ttbr_replace_func *replace_phys;
+
+       phys_addr_t pgd_phys = virt_to_phys(pgd);
+
+       replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+
+       cpu_install_idmap();
+       replace_phys(pgd_phys);
+       cpu_uninstall_idmap();
 }
 
 /*
index 9b2f5a9d019df493fa6021ee3ca6b4779401d8c4..ae615b9d9a551bab47bb4900e867c8d15db02bb1 100644 (file)
@@ -39,6 +39,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
 #include <asm/pgtable-types.h>
 
 extern void __cpu_clear_user_page(void *p, unsigned long user);
index b008a72f8bc02733347c782f5b9c286cff16f7a2..f75b04e8d732be53e955c342d325481451950b0c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/dma-mapping.h>
 
 #include <asm/io.h>
-#include <asm-generic/pci-bridge.h>
 #include <asm-generic/pci-dma-compat.h>
 
 #define PCIBIOS_MIN_IO         0x1000
index c15053902942e0a3a34ba4882545c5b6d163c23c..ff98585d085aa5737c9c17478555f22b11261f2f 100644 (file)
@@ -42,11 +42,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_page((unsigned long)pmd);
 }
 
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
 {
-       set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+       set_pud(pud, __pud(pmd | prot));
 }
 
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+}
+#else
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+{
+       BUILD_BUG();
+}
 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -62,11 +71,20 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
        free_page((unsigned long)pud);
 }
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
 {
-       set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+       set_pgd(pgdp, __pgd(pud | prot));
 }
 
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+       __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+}
+#else
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+{
+       BUILD_BUG();
+}
 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
index 2d545d7aa80ba715f65cc82b671213fa5505b495..a440f5a85d08b44894e7e398a2c35ce5eb3f4e03 100644 (file)
  *
  * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
  *     (rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_START: beginning of the kernel vmalloc space
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *     fixed mappings and modules
  */
 #define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 
-#ifndef CONFIG_KASAN
-#define VMALLOC_START          (VA_START)
-#else
-#include <asm/kasan.h>
-#define VMALLOC_START          (KASAN_SHADOW_END + SZ_64K)
-#endif
-
+#define VMALLOC_START          (MODULES_END)
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
@@ -57,6 +51,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/fixmap.h>
 #include <linux/mmdebug.h>
 
 extern void __pte_error(const char *file, int line, unsigned long val);
@@ -67,11 +62,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PROT_DEFAULT           (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL       (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -81,7 +76,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 #define PAGE_KERNEL            __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO         __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX        __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX                __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT  __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
@@ -121,8 +116,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr)       (empty_zero_page)
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr)       virt_to_page(empty_zero_page)
 
 #define pte_ERROR(pte)         __pte_error(__FILE__, __LINE__, pte_val(pte))
 
@@ -134,16 +129,6 @@ extern struct page *empty_zero_page;
 #define pte_clear(mm,addr,ptep)        set_pte(ptep, __pte(0))
 #define pte_page(pte)          (pfn_to_page(pte_pfn(pte)))
 
-/* Find an entry in the third-level page table. */
-#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(dir,addr)    (pmd_page_vaddr(*(dir)) + pte_index(addr))
-
-#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte)                 do { } while (0)
-#define pte_unmap_nested(pte)          do { } while (0)
-
 /*
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
@@ -153,6 +138,7 @@ extern struct page *empty_zero_page;
 #define pte_write(pte)         (!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)          (!(pte_val(pte) & PTE_UXN))
 #define pte_cont(pte)          (!!(pte_val(pte) & PTE_CONT))
+#define pte_user(pte)          (!!(pte_val(pte) & PTE_USER))
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 #define pte_hw_dirty(pte)      (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -163,8 +149,6 @@ extern struct page *empty_zero_page;
 #define pte_dirty(pte)         (pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)         (!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 #define pte_valid_not_user(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 #define pte_valid_young(pte) \
@@ -278,13 +262,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
-       if (pte_valid_user(pte)) {
-               if (!pte_special(pte) && pte_exec(pte))
-                       __sync_icache_dcache(pte, addr);
+       if (pte_valid(pte)) {
                if (pte_sw_dirty(pte) && pte_write(pte))
                        pte_val(pte) &= ~PTE_RDONLY;
                else
                        pte_val(pte) |= PTE_RDONLY;
+               if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
+                       __sync_icache_dcache(pte, addr);
        }
 
        /*
@@ -433,13 +417,31 @@ static inline void pmd_clear(pmd_t *pmdp)
        set_pmd(pmdp, __pmd(0));
 }
 
-static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 {
-       return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+       return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
+/* Find an entry in the third-level page table. */
+#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_phys(dir,addr)      (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_kernel(dir,addr)    ((pte_t *)__va(pte_offset_phys((dir), (addr))))
+
+#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)                 do { } while (0)
+#define pte_unmap_nested(pte)          do { } while (0)
+
+#define pte_set_fixmap(addr)           ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
+#define pte_set_fixmap_offset(pmd, addr)       pte_set_fixmap(pte_offset_phys(pmd, addr))
+#define pte_clear_fixmap()             clear_fixmap(FIX_PTE)
+
 #define pmd_page(pmd)          pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pte_offset_kimg(dir,addr)      ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -466,21 +468,37 @@ static inline void pud_clear(pud_t *pudp)
        set_pud(pudp, __pud(0));
 }
 
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline phys_addr_t pud_page_paddr(pud_t pud)
 {
-       return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+       return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
 /* Find an entry in the second-level page table. */
 #define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
-       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
+#define pmd_offset_phys(dir, addr)     (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset(dir, addr)          ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
+
+#define pmd_set_fixmap(addr)           ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
+#define pmd_set_fixmap_offset(pud, addr)       pmd_set_fixmap(pmd_offset_phys(pud, addr))
+#define pmd_clear_fixmap()             clear_fixmap(FIX_PMD)
 
 #define pud_page(pud)          pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pmd_offset_kimg(dir,addr)      ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
+
+#else
+
+#define pud_page_paddr(pud)    ({ BUILD_BUG(); 0; })
+
+/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
+#define pmd_set_fixmap(addr)           NULL
+#define pmd_set_fixmap_offset(pudp, addr)      ((pmd_t *)pudp)
+#define pmd_clear_fixmap()
+
+#define pmd_offset_kimg(dir,addr)      ((pmd_t *)dir)
+
 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -502,21 +520,37 @@ static inline void pgd_clear(pgd_t *pgdp)
        set_pgd(pgdp, __pgd(0));
 }
 
-static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 {
-       return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+       return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
 /* Find an entry in the frst-level page table. */
 #define pud_index(addr)                (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
-{
-       return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
-}
+#define pud_offset_phys(dir, addr)     (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset(dir, addr)          ((pud_t *)__va(pud_offset_phys((dir), (addr))))
+
+#define pud_set_fixmap(addr)           ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
+#define pud_set_fixmap_offset(pgd, addr)       pud_set_fixmap(pud_offset_phys(pgd, addr))
+#define pud_clear_fixmap()             clear_fixmap(FIX_PUD)
 
 #define pgd_page(pgd)          pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pud_offset_kimg(dir,addr)      ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
+
+#else
+
+#define pgd_page_paddr(pgd)    ({ BUILD_BUG(); 0;})
+
+/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
+#define pud_set_fixmap(addr)           NULL
+#define pud_set_fixmap_offset(pgdp, addr)      ((pud_t *)pgdp)
+#define pud_clear_fixmap()
+
+#define pud_offset_kimg(dir,addr)      ((pud_t *)dir)
+
 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 
 #define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -524,11 +558,16 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)                (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 
-#define pgd_offset(mm, addr)   ((mm)->pgd+pgd_index(addr))
+#define pgd_offset_raw(pgd, addr)      ((pgd) + pgd_index(addr))
+
+#define pgd_offset(mm, addr)   (pgd_offset_raw((mm)->pgd, (addr)))
 
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
+#define pgd_set_fixmap(addr)   ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap()     clear_fixmap(FIX_PGD)
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
index 4acb7ca94fcd9c05569f3103ab09097c19ea72d5..5bb1d763d17addb2beb0d0f462869cfc183b0595 100644 (file)
 
 #include <linux/string.h>
 
+#include <asm/alternative.h>
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/lse.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
@@ -177,9 +179,11 @@ static inline void prefetchw(const void *ptr)
 }
 
 #define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *x)
+static inline void spin_lock_prefetch(const void *ptr)
 {
-       prefetchw(x);
+       asm volatile(ARM64_LSE_ATOMIC_INSN(
+                    "prfm pstl1strm, %a0",
+                    "nop") : : "p" (ptr));
 }
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
index d9c3d6a6100ac5d68e9b412113daccd1e43d8371..2013a4dc5124a55c5c304306b41908f16a0e5d64 100644 (file)
@@ -64,6 +64,15 @@ extern void secondary_entry(void);
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+#else
+static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       BUILD_BUG();
+}
+#endif
+
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
index 83cd7e68e83b2e59740b8e56268b7f0a2604aa63..8a9c65ccb6369cfcb50c5ee98f8356c8d5c369ba 100644 (file)
@@ -41,6 +41,7 @@ arm64-obj-$(CONFIG_EFI)                       += efi.o efi-entry.stub.o
 arm64-obj-$(CONFIG_PCI)                        += pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
 arm64-obj-$(CONFIG_ACPI)               += acpi.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)        += acpi_parking_protocol.o
 arm64-obj-$(CONFIG_PARAVIRT)           += paravirt.o
 
 obj-y                                  += $(arm64-obj-y) vdso/
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
new file mode 100644 (file)
index 0000000..4b1e5a7
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * ARM64 ACPI Parking Protocol implementation
+ *
+ * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *         Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+#include <asm/cpu_ops.h>
+
+struct cpu_mailbox_entry {
+       phys_addr_t mailbox_addr;
+       u8 version;
+       u8 gic_cpu_id;
+};
+
+static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
+
+void __init acpi_set_mailbox_entry(int cpu,
+                                  struct acpi_madt_generic_interrupt *p)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+       cpu_entry->mailbox_addr = p->parked_address;
+       cpu_entry->version = p->parking_version;
+       cpu_entry->gic_cpu_id = p->cpu_interface_number;
+}
+
+bool acpi_parking_protocol_valid(int cpu)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+       return cpu_entry->mailbox_addr && cpu_entry->version;
+}
+
+static int acpi_parking_protocol_cpu_init(unsigned int cpu)
+{
+       pr_debug("%s: ACPI parked addr=%llx\n", __func__,
+                 cpu_mailbox_entries[cpu].mailbox_addr);
+
+       return 0;
+}
+
+static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
+{
+       return 0;
+}
+
+struct parking_protocol_mailbox {
+       __le32 cpu_id;
+       __le32 reserved;
+       __le64 entry_point;
+};
+
+static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+       struct parking_protocol_mailbox __iomem *mailbox;
+       __le32 cpu_id;
+
+       /*
+        * Map mailbox memory with attribute device nGnRE (ie ioremap -
+        * this deviates from the parking protocol specifications since
+        * the mailboxes are required to be mapped nGnRnE; the attribute
+        * discrepancy is harmless insofar as the protocol specification
+        * is concerned).
+        * If the mailbox is mistakenly allocated in the linear mapping
+        * by FW ioremap will fail since the mapping will be prevented
+        * by the kernel (it clashes with the linear mapping attributes
+        * specifications).
+        */
+       mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+       if (!mailbox)
+               return -EIO;
+
+       cpu_id = readl_relaxed(&mailbox->cpu_id);
+       /*
+        * Check if firmware has set-up the mailbox entry properly
+        * before kickstarting the respective cpu.
+        */
+       if (cpu_id != ~0U) {
+               iounmap(mailbox);
+               return -ENXIO;
+       }
+
+       /*
+        * We write the entry point and cpu id as LE regardless of the
+        * native endianness of the kernel. Therefore, any boot-loaders
+        * that read this address need to convert this address to the
+        * Boot-Loader's endianness before jumping.
+        */
+       writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+       writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
+
+       arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+       iounmap(mailbox);
+
+       return 0;
+}
+
+static void acpi_parking_protocol_cpu_postboot(void)
+{
+       int cpu = smp_processor_id();
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+       struct parking_protocol_mailbox __iomem *mailbox;
+       __le64 entry_point;
+
+       /*
+        * Map mailbox memory with attribute device nGnRE (ie ioremap -
+        * this deviates from the parking protocol specifications since
+        * the mailboxes are required to be mapped nGnRnE; the attribute
+        * discrepancy is harmless insofar as the protocol specification
+        * is concerned).
+        * If the mailbox is mistakenly allocated in the linear mapping
+        * by FW ioremap will fail since the mapping will be prevented
+        * by the kernel (it clashes with the linear mapping attributes
+        * specifications).
+        */
+       mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+       if (!mailbox)
+               return;
+
+       entry_point = readl_relaxed(&mailbox->entry_point);
+       /*
+        * Check if firmware has cleared the entry_point as expected
+        * by the protocol specification.
+        */
+       WARN_ON(entry_point);
+
+       iounmap(mailbox);
+}
+
+const struct cpu_operations acpi_parking_protocol_ops = {
+       .name           = "parking-protocol",
+       .cpu_init       = acpi_parking_protocol_cpu_init,
+       .cpu_prepare    = acpi_parking_protocol_cpu_prepare,
+       .cpu_boot       = acpi_parking_protocol_cpu_boot,
+       .cpu_postboot   = acpi_parking_protocol_cpu_postboot
+};
index feb6b4efa6414846d5598ccb0913a544ba0cf441..e6bc988e8dbf0f69fc4b1a48f9a7b4a89ee713f3 100644 (file)
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
 
-#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX  MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-
-#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
-                       MIDR_ARCHITECTURE_MASK)
-
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
 {
-       u32 midr = read_cpuid_id();
-
-       if ((midr & CPU_MODEL_MASK) != entry->midr_model)
-               return false;
-
-       midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
-
-       return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+       return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
+                                      entry->midr_range_min,
+                                      entry->midr_range_max);
 }
 
 #define MIDR_RANGE(model, min, max) \
index b6bd7d4477683393fb34dc6b055b07382e8ab050..c7cfb8fe06f94c7f5113abf0cb624980e4227127 100644 (file)
 #include <asm/smp_plat.h>
 
 extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations acpi_parking_protocol_ops;
 extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
        &smp_spin_table_ops,
        &cpu_psci_ops,
        NULL,
 };
 
+static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+       &acpi_parking_protocol_ops,
+#endif
+       &cpu_psci_ops,
+       NULL,
+};
+
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-       const struct cpu_operations **ops = supported_cpu_ops;
+       const struct cpu_operations **ops;
+
+       ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
        while (*ops) {
                if (!strcmp(name, (*ops)->name))
@@ -75,8 +86,16 @@ static const char *__init cpu_read_enable_method(int cpu)
                }
        } else {
                enable_method = acpi_get_enable_method(cpu);
-               if (!enable_method)
-                       pr_err("Unsupported ACPI enable-method\n");
+               if (!enable_method) {
+                       /*
+                        * In ACPI systems the boot CPU does not require
+                        * checking the enable method since for some
+                        * boot protocol (ie parking protocol) it need not
+                        * be initialized. Don't warn spuriously.
+                        */
+                       if (cpu != 0)
+                               pr_err("Unsupported ACPI enable-method\n");
+               }
        }
 
        return enable_method;
index 5c90aa490a2bee2368ae45bba2628603afe1c659..3615d7d7c9af65520c5b835d4e58d130ae6c061e 100644 (file)
@@ -621,6 +621,18 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
        return has_sre;
 }
 
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+{
+       u32 midr = read_cpuid_id();
+       u32 rv_min, rv_max;
+
+       /* Cavium ThunderX pass 1.x and 2.x */
+       rv_min = 0;
+       rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+       return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -651,6 +663,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .min_field_value = 2,
        },
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+       {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+               .matches = has_no_hw_prefetch,
+       },
        {},
 };
 
index 8aee3aeec3e687edde6f5be67233299e7a4f7d4f..c1492ba1f6d14e71c263fa26904fab9980439b4f 100644 (file)
@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
 
 /* EL1 Single Step Handler hooks */
 static LIST_HEAD(step_hook);
-static DEFINE_RWLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(step_hook_lock);
 
 void register_step_hook(struct step_hook *hook)
 {
-       write_lock(&step_hook_lock);
-       list_add(&hook->node, &step_hook);
-       write_unlock(&step_hook_lock);
+       spin_lock(&step_hook_lock);
+       list_add_rcu(&hook->node, &step_hook);
+       spin_unlock(&step_hook_lock);
 }
 
 void unregister_step_hook(struct step_hook *hook)
 {
-       write_lock(&step_hook_lock);
-       list_del(&hook->node);
-       write_unlock(&step_hook_lock);
+       spin_lock(&step_hook_lock);
+       list_del_rcu(&hook->node);
+       spin_unlock(&step_hook_lock);
+       synchronize_rcu();
 }
 
 /*
@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
        struct step_hook *hook;
        int retval = DBG_HOOK_ERROR;
 
-       read_lock(&step_hook_lock);
+       rcu_read_lock();
 
-       list_for_each_entry(hook, &step_hook, node)     {
+       list_for_each_entry_rcu(hook, &step_hook, node) {
                retval = hook->fn(regs, esr);
                if (retval == DBG_HOOK_HANDLED)
                        break;
        }
 
-       read_unlock(&step_hook_lock);
+       rcu_read_unlock();
 
        return retval;
 }
index ffe9c2b6431bd5c0cb1e9982afe84e13a82e2043..05b98289093e79c2651559f309a56b765290c6c4 100644 (file)
@@ -389,7 +389,7 @@ __create_page_tables:
         * Map the kernel image (starting with PHYS_OFFSET).
         */
        mov     x0, x26                         // swapper_pg_dir
-       mov     x5, #PAGE_OFFSET
+       ldr     x5, =KIMAGE_VADDR
        create_pgd_entry x0, x5, x3, x6
        ldr     x6, =KERNEL_END                 // __va(KERNEL_END)
        mov     x3, x24                         // phys offset
@@ -421,13 +421,18 @@ __mmap_switched:
        adr_l   x2, __bss_stop
        sub     x2, x2, x0
        bl      __pi_memset
+       dsb     ishst                           // Make zero page visible to PTW
 
        adr_l   sp, initial_sp, x4
        mov     x4, sp
        and     x4, x4, #~(THREAD_SIZE - 1)
        msr     sp_el0, x4                      // Save thread_info
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
-       str_l   x24, memstart_addr, x6          // Save PHYS_OFFSET
+
+       ldr     x4, =KIMAGE_VADDR               // Save the offset between
+       sub     x4, x4, x24                     // the kernel virtual and
+       str_l   x4, kimage_voffset, x5          // physical mappings
+
        mov     x29, #0
 #ifdef CONFIG_KASAN
        bl      kasan_early_init
@@ -514,9 +519,14 @@ CPU_LE(    movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
 #endif
 
        /* EL2 debug */
+       mrs     x0, id_aa64dfr0_el1             // Check ID_AA64DFR0_EL1 PMUVer
+       sbfx    x0, x0, #8, #4
+       cmp     x0, #1
+       b.lt    4f                              // Skip if no PMU present
        mrs     x0, pmcr_el0                    // Disable debug access traps
        ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
        msr     mdcr_el2, x0                    // all PMU counters from EL1
+4:
 
        /* Stage-2 translation */
        msr     vttbr_el2, xzr
index bc2abb8b1599576ae2dec02bce0c46c48fc707dd..c9c62cab25a4a6bd62370149033a0074122d66f8 100644 (file)
 #endif
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE         1
 #else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE         0
 #endif
 
-#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+#define __HEAD_FLAG_PAGE_SIZE  ((PAGE_SHIFT - 10) / 2)
 
-#define __HEAD_FLAGS   ((__HEAD_FLAG_BE << 0) |        \
-                        (__HEAD_FLAG_PAGE_SIZE << 1))
+#define __HEAD_FLAG_PHYS_BASE  1
+
+#define __HEAD_FLAGS           ((__HEAD_FLAG_BE << 0) |        \
+                                (__HEAD_FLAG_PAGE_SIZE << 1) | \
+                                (__HEAD_FLAG_PHYS_BASE << 3))
 
 /*
  * These will output as part of the Image header, which should be little-endian
 
 #ifdef CONFIG_EFI
 
+/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym)     ABSOLUTE(sym)
+
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp               = __pi_memcmp;
-__efistub_memchr               = __pi_memchr;
-__efistub_memcpy               = __pi_memcpy;
-__efistub_memmove              = __pi_memmove;
-__efistub_memset               = __pi_memset;
-__efistub_strlen               = __pi_strlen;
-__efistub_strcmp               = __pi_strcmp;
-__efistub_strncmp              = __pi_strncmp;
-__efistub___flush_dcache_area  = __pi___flush_dcache_area;
+__efistub_memcmp               = KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr               = KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy               = KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove              = KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset               = KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen               = KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp               = KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp              = KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area  = KALLSYMS_HIDE(__pi___flush_dcache_area);
 
 #ifdef CONFIG_KASAN
-__efistub___memcpy             = __pi_memcpy;
-__efistub___memmove            = __pi_memmove;
-__efistub___memset             = __pi_memset;
+__efistub___memcpy             = KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove            = KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset             = KALLSYMS_HIDE(__pi_memset);
 #endif
 
-__efistub__text                        = _text;
-__efistub__end                 = _end;
-__efistub__edata               = _edata;
+__efistub__text                        = KALLSYMS_HIDE(_text);
+__efistub__end                 = KALLSYMS_HIDE(_end);
+__efistub__edata               = KALLSYMS_HIDE(_edata);
 
 #endif
 
index b3d098bd34aa3d2a57c3c9b2b3c7b63a6001be9d..c72de668e1d4d0fa549ef3b9f79d3c696ad40c07 100644 (file)
@@ -19,8 +19,6 @@
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 
-#include <asm/pci-bridge.h>
-
 /*
  * Called after each bus is probed, but before its children are examined
  */
index f67f35b6edb12e4d34e1db17750b07a0bec72e39..42816bebb1e0f732d788780fde431028f202c31a 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/smp.h>
 #include <linux/delay.h>
 #include <linux/psci.h>
-#include <linux/slab.h>
 
 #include <uapi/linux/psci.h>
 
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
 #include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
-{
-       int i, ret, count = 0;
-       u32 *psci_states;
-       struct device_node *state_node, *cpu_node;
-
-       cpu_node = of_get_cpu_node(cpu, NULL);
-       if (!cpu_node)
-               return -ENODEV;
-
-       /*
-        * If the PSCI cpu_suspend function hook has not been initialized
-        * idle states must not be enabled, so bail out
-        */
-       if (!psci_ops.cpu_suspend)
-               return -EOPNOTSUPP;
-
-       /* Count idle states */
-       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-                                             count))) {
-               count++;
-               of_node_put(state_node);
-       }
-
-       if (!count)
-               return -ENODEV;
-
-       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-       if (!psci_states)
-               return -ENOMEM;
-
-       for (i = 0; i < count; i++) {
-               u32 state;
-
-               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-
-               ret = of_property_read_u32(state_node,
-                                          "arm,psci-suspend-param",
-                                          &state);
-               if (ret) {
-                       pr_warn(" * %s missing arm,psci-suspend-param property\n",
-                               state_node->full_name);
-                       of_node_put(state_node);
-                       goto free_mem;
-               }
-
-               of_node_put(state_node);
-               pr_debug("psci-power-state %#x index %d\n", state, i);
-               if (!psci_power_state_is_valid(state)) {
-                       pr_warn("Invalid PSCI power state %#x\n", state);
-                       ret = -EINVAL;
-                       goto free_mem;
-               }
-               psci_states[i] = state;
-       }
-       /* Idle states parsed correctly, initialize per-cpu pointer */
-       per_cpu(psci_power_state, cpu) = psci_states;
-       return 0;
-
-free_mem:
-       kfree(psci_states);
-       return ret;
-}
 
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
@@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
 }
 #endif
 
-static int psci_suspend_finisher(unsigned long index)
-{
-       u32 *state = __this_cpu_read(psci_power_state);
-
-       return psci_ops.cpu_suspend(state[index - 1],
-                                   virt_to_phys(cpu_resume));
-}
-
-static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
-{
-       int ret;
-       u32 *state = __this_cpu_read(psci_power_state);
-       /*
-        * idle state index 0 corresponds to wfi, should never be called
-        * from the cpu_suspend operations
-        */
-       if (WARN_ON_ONCE(!index))
-               return -EINVAL;
-
-       if (!psci_power_state_loses_context(state[index - 1]))
-               ret = psci_ops.cpu_suspend(state[index - 1], 0);
-       else
-               ret = cpu_suspend(index, psci_suspend_finisher);
-
-       return ret;
-}
-
 const struct cpu_operations cpu_psci_ops = {
        .name           = "psci",
 #ifdef CONFIG_CPU_IDLE
-       .cpu_init_idle  = cpu_psci_cpu_init_idle,
-       .cpu_suspend    = cpu_psci_cpu_suspend,
+       .cpu_init_idle  = psci_cpu_init_idle,
+       .cpu_suspend    = psci_cpu_suspend_enter,
 #endif
        .cpu_init       = cpu_psci_cpu_init,
        .cpu_prepare    = cpu_psci_cpu_prepare,
index 8119479147db147c33800f76aa0d07c6072e8559..cfed56f0ad26d64714b137f124ac8112b12331b1 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/memblock.h>
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
+#include <asm/mmu_context.h>
 
 phys_addr_t __fdt_pointer __initdata;
 
@@ -313,6 +314,12 @@ void __init setup_arch(char **cmdline_p)
         */
        local_async_enable();
 
+       /*
+        * TTBR0 is only used for the identity mapping at this stage. Make it
+        * point to zero page to avoid speculatively fetching new entries.
+        */
+       cpu_uninstall_idmap();
+
        efi_init();
        arm64_memblock_init();
 
index b1adc51b2c2e7682212554ba8276b5e7c25fbff5..24cb4f800033bc2b9d5ad49144f915ca4506e6dc 100644 (file)
@@ -70,6 +70,7 @@ enum ipi_msg_type {
        IPI_CPU_STOP,
        IPI_TIMER,
        IPI_IRQ_WORK,
+       IPI_WAKEUP
 };
 
 /*
@@ -149,9 +150,7 @@ asmlinkage void secondary_start_kernel(void)
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
         */
-       cpu_set_reserved_ttbr0();
-       local_flush_tlb_all();
-       cpu_set_default_tcr_t0sz();
+       cpu_uninstall_idmap();
 
        preempt_disable();
        trace_hardirqs_off();
@@ -445,6 +444,17 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
        /* map the logical cpu id to cpu MPIDR */
        cpu_logical_map(cpu_count) = hwid;
 
+       /*
+        * Set-up the ACPI parking protocol cpu entries
+        * while initializing the cpu_logical_map to
+        * avoid parsing MADT entries multiple times for
+        * nothing (ie a valid cpu_logical_map entry should
+        * contain a valid parking protocol data set to
+        * initialize the cpu if the parking protocol is
+        * the only available enable method).
+        */
+       acpi_set_mailbox_entry(cpu_count, processor);
+
        cpu_count++;
 }
 
@@ -627,6 +637,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
        S(IPI_CPU_STOP, "CPU stop interrupts"),
        S(IPI_TIMER, "Timer broadcast interrupts"),
        S(IPI_IRQ_WORK, "IRQ work interrupts"),
+       S(IPI_WAKEUP, "CPU wake-up interrupts"),
 };
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
@@ -670,6 +681,13 @@ void arch_send_call_function_single_ipi(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_WAKEUP);
+}
+#endif
+
 #ifdef CONFIG_IRQ_WORK
 void arch_irq_work_raise(void)
 {
@@ -747,6 +765,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                break;
 #endif
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+       case IPI_WAKEUP:
+               WARN_ONCE(!acpi_parking_protocol_valid(cpu),
+                         "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
+                         cpu);
+               break;
+#endif
+
        default:
                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
                break;
index 1095aa483a1c28e5387b23895c14d7a1746268a3..66055392f445ef47a7fb3749ca6024df1ea185c9 100644 (file)
@@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
  */
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-       struct mm_struct *mm = current->active_mm;
        int ret;
        unsigned long flags;
 
@@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        ret = __cpu_suspend_enter(arg, fn);
        if (ret == 0) {
                /*
-                * We are resuming from reset with TTBR0_EL1 set to the
-                * idmap to enable the MMU; set the TTBR0 to the reserved
-                * page tables to prevent speculative TLB allocations, flush
-                * the local tlb and set the default tcr_el1.t0sz so that
-                * the TTBR0 address space set-up is properly restored.
-                * If the current active_mm != &init_mm we entered cpu_suspend
-                * with mappings in TTBR0 that must be restored, so we switch
-                * them back to complete the address space configuration
-                * restoration before returning.
+                * We are resuming from reset with the idmap active in TTBR0_EL1.
+                * We must uninstall the idmap and restore the expected MMU
+                * state before we can possibly return to userspace.
                 */
-               cpu_set_reserved_ttbr0();
-               local_flush_tlb_all();
-               cpu_set_default_tcr_t0sz();
-
-               if (mm != &init_mm)
-                       cpu_switch_mm(mm->pgd, mm);
+               cpu_uninstall_idmap();
 
                /*
                 * Restore per-cpu offset before any kernel
index e3928f578891fdd0a5d3535975d350b6489f8df1..282e3e64a17e424f1806433d7b2648db34600837 100644 (file)
@@ -89,13 +89,13 @@ SECTIONS
                *(.discard.*)
        }
 
-       . = PAGE_OFFSET + TEXT_OFFSET;
+       . = KIMAGE_VADDR + TEXT_OFFSET;
 
        .head.text : {
                _text = .;
                HEAD_TEXT
        }
-       ALIGN_DEBUG_RO
+       ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
        .text : {                       /* Real text segment            */
                _stext = .;             /* Text and read-only data      */
                        __exception_text_start = .;
@@ -116,10 +116,9 @@ SECTIONS
        RO_DATA(PAGE_SIZE)
        EXCEPTION_TABLE(8)
        NOTES
-       ALIGN_DEBUG_RO
-       _etext = .;                     /* End of text and rodata section */
 
        ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+       _etext = .;                     /* End of text and rodata section */
        __init_begin = .;
 
        INIT_TEXT_SECTION(8)
@@ -187,4 +186,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
-ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
index 0ccdcbbef3c20ce0b4afa9874eb128cf55beb683..870578f84b1ca8940d47a9ea2a90b1600a1148e1 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/assembler.h>
 
 /*
- * u64 kvm_call_hyp(void *hypfn, ...);
+ * u64 __kvm_call_hyp(void *hypfn, ...);
  *
  * This is not really a variadic function in the classic C-way and care must
  * be taken when calling this to ensure parameters are passed in registers
@@ -37,7 +37,7 @@
  * used to implement __hyp_get_vectors in the same way as in
  * arch/arm64/kernel/hyp_stub.S.
  */
-ENTRY(kvm_call_hyp)
+ENTRY(__kvm_call_hyp)
        hvc     #0
        ret
-ENDPROC(kvm_call_hyp)
+ENDPROC(__kvm_call_hyp)
index ca8f5a5e2f965748fca28ced482c9c195270e5ab..f0e7bdfae134a727ec7c0ac76466020fdb65e0fb 100644 (file)
@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
        write_sysreg(val, hcr_el2);
        /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
        write_sysreg(1 << 15, hstr_el2);
-       write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+
+       val = CPTR_EL2_DEFAULT;
+       val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+       write_sysreg(val, cptr_el2);
+
        write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
 }
 
@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
        write_sysreg(HCR_RW, hcr_el2);
        write_sysreg(0, hstr_el2);
        write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
-       write_sysreg(0, cptr_el2);
+       write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
index 648112e90ed546d2d052ccf7d9f66866d2390d06..4d1ac81870d27e6f272abde088e0f5e8290c80d1 100644 (file)
 
 #define PSTATE_FAULT_BITS_64   (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
                                 PSR_I_BIT | PSR_D_BIT)
-#define EL1_EXCEPT_SYNC_OFFSET 0x200
+
+#define CURRENT_EL_SP_EL0_VECTOR       0x0
+#define CURRENT_EL_SP_ELx_VECTOR       0x200
+#define LOWER_EL_AArch64_VECTOR                0x400
+#define LOWER_EL_AArch32_VECTOR                0x600
 
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
                *fsr = 0x14;
 }
 
+enum exception_type {
+       except_type_sync        = 0,
+       except_type_irq         = 0x80,
+       except_type_fiq         = 0x100,
+       except_type_serror      = 0x180,
+};
+
+static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+{
+       u64 exc_offset;
+
+       switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
+       case PSR_MODE_EL1t:
+               exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+               break;
+       case PSR_MODE_EL1h:
+               exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+               break;
+       case PSR_MODE_EL0t:
+               exc_offset = LOWER_EL_AArch64_VECTOR;
+               break;
+       default:
+               exc_offset = LOWER_EL_AArch32_VECTOR;
+       }
+
+       return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
        unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
        *vcpu_spsr(vcpu) = cpsr;
        *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
        *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-       *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
        vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
        *vcpu_spsr(vcpu) = cpsr;
        *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
        *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-       *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
        /*
         * Build an unknown exception, depending on the instruction
index eec3598b4184077b83b5a1f24321891cb110f5bb..2e90371cfb378b0e064667506a2b74c9275cacfb 100644 (file)
@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
                if (likely(r->access(vcpu, params, r))) {
                        /* Skip instruction, since it was emulated */
                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+                       /* Handled */
+                       return 0;
                }
-
-               /* Handled */
-               return 0;
        }
 
        /* Not handled */
@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
 }
 
 /**
- * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  */
@@ -1095,7 +1094,7 @@ out:
 }
 
 /**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  */
index 512b9a7b980e98bbed9a699107e936e4b1913dca..4c1e700840b6ced5a0b2f868bfb4f37dddc8abc0 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/const.h>
 #include <asm/assembler.h>
 #include <asm/page.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
 
 /*
  * Copy a page from src to dest (both are page aligned)
  *     x1 - src
  */
 ENTRY(copy_page)
-       /* Assume cache line size is 64 bytes. */
-       prfm    pldl1strm, [x1, #64]
-1:     ldp     x2, x3, [x1]
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+       nop
+       nop
+alternative_else
+       # Prefetch two cache lines ahead.
+       prfm    pldl1strm, [x1, #128]
+       prfm    pldl1strm, [x1, #256]
+alternative_endif
+
+       ldp     x2, x3, [x1]
        ldp     x4, x5, [x1, #16]
        ldp     x6, x7, [x1, #32]
        ldp     x8, x9, [x1, #48]
-       add     x1, x1, #64
-       prfm    pldl1strm, [x1, #64]
+       ldp     x10, x11, [x1, #64]
+       ldp     x12, x13, [x1, #80]
+       ldp     x14, x15, [x1, #96]
+       ldp     x16, x17, [x1, #112]
+
+       mov     x18, #(PAGE_SIZE - 128)
+       add     x1, x1, #128
+1:
+       subs    x18, x18, #128
+
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+       nop
+alternative_else
+       prfm    pldl1strm, [x1, #384]
+alternative_endif
+
        stnp    x2, x3, [x0]
+       ldp     x2, x3, [x1]
        stnp    x4, x5, [x0, #16]
+       ldp     x4, x5, [x1, #16]
        stnp    x6, x7, [x0, #32]
+       ldp     x6, x7, [x1, #32]
        stnp    x8, x9, [x0, #48]
-       add     x0, x0, #64
-       tst     x1, #(PAGE_SIZE - 1)
-       b.ne    1b
+       ldp     x8, x9, [x1, #48]
+       stnp    x10, x11, [x0, #64]
+       ldp     x10, x11, [x1, #64]
+       stnp    x12, x13, [x0, #80]
+       ldp     x12, x13, [x1, #80]
+       stnp    x14, x15, [x0, #96]
+       ldp     x14, x15, [x1, #96]
+       stnp    x16, x17, [x0, #112]
+       ldp     x16, x17, [x1, #112]
+
+       add     x0, x0, #128
+       add     x1, x1, #128
+
+       b.gt    1b
+
+       stnp    x2, x3, [x0]
+       stnp    x4, x5, [x0, #16]
+       stnp    x6, x7, [x0, #32]
+       stnp    x8, x9, [x0, #48]
+       stnp    x10, x11, [x0, #64]
+       stnp    x12, x13, [x0, #80]
+       stnp    x14, x15, [x0, #96]
+       stnp    x16, x17, [x0, #112]
+
        ret
 ENDPROC(copy_page)
index 5a22a119a74c87b4b5b54e114701b3c6eed233e6..6be918478f855021fee88a50fefe1b6642252c81 100644 (file)
@@ -35,7 +35,9 @@ struct addr_marker {
 };
 
 enum address_markers_idx {
-       VMALLOC_START_NR = 0,
+       MODULES_START_NR = 0,
+       MODULES_END_NR,
+       VMALLOC_START_NR,
        VMALLOC_END_NR,
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
        VMEMMAP_START_NR,
@@ -45,12 +47,12 @@ enum address_markers_idx {
        FIXADDR_END_NR,
        PCI_START_NR,
        PCI_END_NR,
-       MODULES_START_NR,
-       MODUELS_END_NR,
        KERNEL_SPACE_NR,
 };
 
 static struct addr_marker address_markers[] = {
+       { MODULES_VADDR,        "Modules start" },
+       { MODULES_END,          "Modules end" },
        { VMALLOC_START,        "vmalloc() Area" },
        { VMALLOC_END,          "vmalloc() End" },
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -61,9 +63,7 @@ static struct addr_marker address_markers[] = {
        { FIXADDR_TOP,          "Fixmap end" },
        { PCI_IO_START,         "PCI I/O start" },
        { PCI_IO_END,           "PCI I/O end" },
-       { MODULES_VADDR,        "Modules start" },
-       { MODULES_END,          "Modules end" },
-       { PAGE_OFFSET,          "Kernel Mapping" },
+       { PAGE_OFFSET,          "Linear Mapping" },
        { -1,                   NULL },
 };
 
@@ -90,6 +90,11 @@ struct prot_bits {
 
 static const struct prot_bits pte_bits[] = {
        {
+               .mask   = PTE_VALID,
+               .val    = PTE_VALID,
+               .set    = " ",
+               .clear  = "F",
+       }, {
                .mask   = PTE_USER,
                .val    = PTE_USER,
                .set    = "USR",
index f3b061e67bfe0f4565d5df29322eeaef38a9bcc3..a1df35c85395e2de76b245abf8de0dac6329a823 100644 (file)
 #include <linux/efi.h>
 #include <linux/swiotlb.h>
 
+#include <asm/boot.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
@@ -159,7 +162,33 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
-       memblock_enforce_memory_limit(memory_limit);
+       const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+       /*
+        * Select a suitable value for the base of physical memory.
+        */
+       memstart_addr = round_down(memblock_start_of_DRAM(),
+                                  ARM64_MEMSTART_ALIGN);
+
+       /*
+        * Remove the memory that we will not be able to cover with the
+        * linear mapping. Take care not to clip the kernel which may be
+        * high in memory.
+        */
+       memblock_remove(max(memstart_addr + linear_region_size, __pa(_end)),
+                       ULLONG_MAX);
+       if (memblock_end_of_DRAM() > linear_region_size)
+               memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
+
+       /*
+        * Apply the memory limit if it was set. Since the kernel may be loaded
+        * high up in memory, add back the kernel region that must be accessible
+        * via the linear mapping.
+        */
+       if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+               memblock_enforce_memory_limit(memory_limit);
+               memblock_add(__pa(_text), (u64)(_end - _text));
+       }
 
        /*
         * Register the kernel text, kernel data, initrd, and initial
@@ -302,22 +331,26 @@ void __init mem_init(void)
 #ifdef CONFIG_KASAN
                  "    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #endif
+                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
                  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
+                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
                  "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
 #endif
                  "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
                  "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
 #ifdef CONFIG_KASAN
                  MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
 #endif
+                 MLM(MODULES_VADDR, MODULES_END),
                  MLG(VMALLOC_START, VMALLOC_END),
+                 MLK_ROUNDUP(__init_begin, __init_end),
+                 MLK_ROUNDUP(_text, _etext),
+                 MLK_ROUNDUP(_sdata, _edata),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  MLG((unsigned long)vmemmap,
                      (unsigned long)vmemmap + VMEMMAP_SIZE),
@@ -326,11 +359,7 @@ void __init mem_init(void)
 #endif
                  MLK(FIXADDR_START, FIXADDR_TOP),
                  MLM(PCI_IO_START, PCI_IO_END),
-                 MLM(MODULES_VADDR, MODULES_END),
-                 MLM(PAGE_OFFSET, (unsigned long)high_memory),
-                 MLK_ROUNDUP(__init_begin, __init_end),
-                 MLK_ROUNDUP(_text, _etext),
-                 MLK_ROUNDUP(_sdata, _edata));
+                 MLM(PAGE_OFFSET, (unsigned long)high_memory));
 
 #undef MLK
 #undef MLM
@@ -358,8 +387,8 @@ void __init mem_init(void)
 
 void free_initmem(void)
 {
-       fixup_init();
        free_initmem_default(0);
+       fixup_init();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -380,3 +409,28 @@ static int __init keepinitrd_setup(char *__unused)
 
 __setup("keepinitrd", keepinitrd_setup);
 #endif
+
+/*
+ * Dump out memory limit information on panic.
+ */
+static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+{
+       if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+               pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
+       } else {
+               pr_emerg("Memory Limit: none\n");
+       }
+       return 0;
+}
+
+static struct notifier_block mem_limit_notifier = {
+       .notifier_call = dump_mem_limit,
+};
+
+static int __init register_mem_limit_dumper(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &mem_limit_notifier);
+       return 0;
+}
+__initcall(register_mem_limit_dumper);
index cf038c7d9fa994c7d86e05920ffa8961aecc4ad8..66c246871d2e360f36748375d1a0b9e59cd3024e 100644 (file)
 #include <linux/memblock.h>
 #include <linux/start_kernel.h>
 
+#include <asm/mmu_context.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 #include <asm/tlbflush.h>
 
 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -32,7 +35,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
        if (pmd_none(*pmd))
                pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_offset_kimg(pmd, addr);
        do {
                next = addr + PAGE_SIZE;
                set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -50,7 +53,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
        if (pud_none(*pud))
                pud_populate(&init_mm, pud, kasan_zero_pmd);
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_offset_kimg(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                kasan_early_pte_populate(pmd, addr, next);
@@ -67,7 +70,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
        if (pgd_none(*pgd))
                pgd_populate(&init_mm, pgd, kasan_zero_pud);
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset_kimg(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
                kasan_early_pmd_populate(pud, addr, next);
@@ -96,6 +99,21 @@ asmlinkage void __init kasan_early_init(void)
        kasan_map_early_shadow();
 }
 
+/*
+ * Copy the current shadow region into a new pgdir.
+ */
+void __init kasan_copy_shadow(pgd_t *pgdir)
+{
+       pgd_t *pgd, *pgd_new, *pgd_end;
+
+       pgd = pgd_offset_k(KASAN_SHADOW_START);
+       pgd_end = pgd_offset_k(KASAN_SHADOW_END);
+       pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+       do {
+               set_pgd(pgd_new, *pgd);
+       } while (pgd++, pgd_new++, pgd != pgd_end);
+}
+
 static void __init clear_pgds(unsigned long start,
                        unsigned long end)
 {
@@ -108,18 +126,14 @@ static void __init clear_pgds(unsigned long start,
                set_pgd(pgd_offset_k(start), __pgd(0));
 }
 
-static void __init cpu_set_ttbr1(unsigned long ttbr1)
-{
-       asm(
-       "       msr     ttbr1_el1, %0\n"
-       "       isb"
-       :
-       : "r" (ttbr1));
-}
-
 void __init kasan_init(void)
 {
+       u64 kimg_shadow_start, kimg_shadow_end;
        struct memblock_region *reg;
+       int i;
+
+       kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
+       kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
 
        /*
         * We are going to perform proper setup of shadow memory.
@@ -129,13 +143,30 @@ void __init kasan_init(void)
         * setup will be finished.
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
-       cpu_set_ttbr1(__pa(tmp_pg_dir));
-       flush_tlb_all();
+       dsb(ishst);
+       cpu_replace_ttbr1(tmp_pg_dir);
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
+       vmemmap_populate(kimg_shadow_start, kimg_shadow_end, NUMA_NO_NODE);
+
+       /*
+        * vmemmap_populate() has populated the shadow region that covers the
+        * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
+        * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
+        * kasan_populate_zero_shadow() from replacing the PMD block mappings
+        * with PMD table mappings at the edges of the shadow region for the
+        * kernel image.
+        */
+       if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+               kimg_shadow_start = round_down(kimg_shadow_start,
+                                              SWAPPER_BLOCK_SIZE);
+               kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+       }
        kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
-                       kasan_mem_to_shadow((void *)MODULES_VADDR));
+                                  (void *)kimg_shadow_start);
+       kasan_populate_zero_shadow((void *)kimg_shadow_end,
+                                  kasan_mem_to_shadow((void *)PAGE_OFFSET));
 
        for_each_memblock(memory, reg) {
                void *start = (void *)__phys_to_virt(reg->base);
@@ -155,9 +186,16 @@ void __init kasan_init(void)
                                pfn_to_nid(virt_to_pfn(start)));
        }
 
+       /*
+        * KAsan may reuse the contents of kasan_zero_pte directly, so we
+        * should make sure that it maps the zero page read-only.
+        */
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               set_pte(&kasan_zero_pte[i],
+                       pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+
        memset(kasan_zero_page, 0, PAGE_SIZE);
-       cpu_set_ttbr1(__pa(swapper_pg_dir));
-       flush_tlb_all();
+       cpu_replace_ttbr1(swapper_pg_dir);
 
        /* At this point kasan is fully initialized. Enable error messages */
        init_task.kasan_depth = 0;
index 58faeaa7fbdc6c73a73319e15c18bbd2a0948f06..c3e5df62671e6c4eb8c4a4502dd16f0162224256 100644 (file)
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
 
+#include <asm/barrier.h>
 #include <asm/cputype.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 
+u64 kimage_voffset __read_mostly;
+EXPORT_SYMBOL(kimage_voffset);
+
 /*
  * Empty_zero_page is a special page that is used for zero-initialized data
  * and COW.
  */
-struct page *empty_zero_page;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
 
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                              unsigned long size, pgprot_t vma_prot)
 {
@@ -62,16 +71,30 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 }
 EXPORT_SYMBOL(phys_mem_access_prot);
 
-static void __init *early_alloc(unsigned long sz)
+static phys_addr_t __init early_pgtable_alloc(void)
 {
        phys_addr_t phys;
        void *ptr;
 
-       phys = memblock_alloc(sz, sz);
+       phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        BUG_ON(!phys);
-       ptr = __va(phys);
-       memset(ptr, 0, sz);
-       return ptr;
+
+       /*
+        * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
+        * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
+        * any level of table.
+        */
+       ptr = pte_set_fixmap(phys);
+
+       memset(ptr, 0, PAGE_SIZE);
+
+       /*
+        * Implicit barriers also ensure the zeroed page is visible to the page
+        * table walker
+        */
+       pte_clear_fixmap();
+
+       return phys;
 }
 
 /*
@@ -95,24 +118,30 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pte_t *pte;
 
        if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-               pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+               phys_addr_t pte_phys;
+               BUG_ON(!pgtable_alloc);
+               pte_phys = pgtable_alloc();
+               pte = pte_set_fixmap(pte_phys);
                if (pmd_sect(*pmd))
                        split_pmd(pmd, pte);
-               __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+               __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
                flush_tlb_all();
+               pte_clear_fixmap();
        }
        BUG_ON(pmd_bad(*pmd));
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_set_fixmap_offset(pmd, addr);
        do {
                set_pte(pte, pfn_pte(pfn, prot));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       pte_clear_fixmap();
 }
 
 static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -127,10 +156,29 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
        } while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
-static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
-                                 unsigned long addr, unsigned long end,
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+       /*
+        * If debug_page_alloc is enabled we must map the linear map
+        * using pages. However, other mappings created by
+        * create_mapping_noalloc must use sections in some cases. Allow
+        * sections to be used in those cases, where no pgtable_alloc
+        * function is provided.
+        */
+       return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+#else
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+       return true;
+}
+#endif
+
+static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pmd_t *pmd;
        unsigned long next;
@@ -139,7 +187,10 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
         * Check for initial section mappings in the pgd/pud and remove them.
         */
        if (pud_none(*pud) || pud_sect(*pud)) {
-               pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+               phys_addr_t pmd_phys;
+               BUG_ON(!pgtable_alloc);
+               pmd_phys = pgtable_alloc();
+               pmd = pmd_set_fixmap(pmd_phys);
                if (pud_sect(*pud)) {
                        /*
                         * need to have the 1G of mappings continue to be
@@ -147,16 +198,18 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                         */
                        split_pud(pud, pmd);
                }
-               pud_populate(mm, pud, pmd);
+               __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
                flush_tlb_all();
+               pmd_clear_fixmap();
        }
        BUG_ON(pud_bad(*pud));
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_set_fixmap_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
-               if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+               if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                     block_mappings_allowed(pgtable_alloc)) {
                        pmd_t old_pmd =*pmd;
                        set_pmd(pmd, __pmd(phys |
                                           pgprot_val(mk_sect_prot(prot))));
@@ -167,17 +220,19 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                        if (!pmd_none(old_pmd)) {
                                flush_tlb_all();
                                if (pmd_table(old_pmd)) {
-                                       phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+                                       phys_addr_t table = pmd_page_paddr(old_pmd);
                                        if (!WARN_ON_ONCE(slab_is_available()))
                                                memblock_free(table, PAGE_SIZE);
                                }
                        }
                } else {
                        alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-                                      prot, alloc);
+                                      prot, pgtable_alloc);
                }
                phys += next - addr;
        } while (pmd++, addr = next, addr != end);
+
+       pmd_clear_fixmap();
 }
 
 static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -192,28 +247,30 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
        return true;
 }
 
-static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
-                                 unsigned long addr, unsigned long end,
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pud_t *pud;
        unsigned long next;
 
        if (pgd_none(*pgd)) {
-               pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
-               pgd_populate(mm, pgd, pud);
+               phys_addr_t pud_phys;
+               BUG_ON(!pgtable_alloc);
+               pud_phys = pgtable_alloc();
+               __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
        }
        BUG_ON(pgd_bad(*pgd));
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_set_fixmap_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
 
                /*
                 * For 4K granule only, attempt to put down a 1GB block
                 */
-               if (use_1G_block(addr, next, phys)) {
+               if (use_1G_block(addr, next, phys) &&
+                   block_mappings_allowed(pgtable_alloc)) {
                        pud_t old_pud = *pud;
                        set_pud(pud, __pud(phys |
                                           pgprot_val(mk_sect_prot(prot))));
@@ -228,26 +285,28 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
                        if (!pud_none(old_pud)) {
                                flush_tlb_all();
                                if (pud_table(old_pud)) {
-                                       phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+                                       phys_addr_t table = pud_page_paddr(old_pud);
                                        if (!WARN_ON_ONCE(slab_is_available()))
                                                memblock_free(table, PAGE_SIZE);
                                }
                        }
                } else {
-                       alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
+                       alloc_init_pmd(pud, addr, next, phys, prot,
+                                      pgtable_alloc);
                }
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
+
+       pud_clear_fixmap();
 }
 
 /*
  * Create the page directory entries and any necessary page tables for the
  * mapping specified by 'md'.
  */
-static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
-                                   phys_addr_t phys, unsigned long virt,
+static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
                                    phys_addr_t size, pgprot_t prot,
-                                   void *(*alloc)(unsigned long size))
+                                   phys_addr_t (*pgtable_alloc)(void))
 {
        unsigned long addr, length, end, next;
 
@@ -265,22 +324,35 @@ static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
        end = addr + length;
        do {
                next = pgd_addr_end(addr, end);
-               alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
+               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
                phys += next - addr;
        } while (pgd++, addr = next, addr != end);
 }
 
-static void *late_alloc(unsigned long size)
+static phys_addr_t late_pgtable_alloc(void)
 {
-       void *ptr;
-
-       BUG_ON(size > PAGE_SIZE);
-       ptr = (void *)__get_free_page(PGALLOC_GFP);
+       void *ptr = (void *)__get_free_page(PGALLOC_GFP);
        BUG_ON(!ptr);
-       return ptr;
+
+       /* Ensure the zeroed page is visible to the page table walker */
+       dsb(ishst);
+       return __pa(ptr);
+}
+
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+                                unsigned long virt, phys_addr_t size,
+                                pgprot_t prot,
+                                phys_addr_t (*alloc)(void))
+{
+       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
 }
 
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
                                  phys_addr_t size, pgprot_t prot)
 {
        if (virt < VMALLOC_START) {
@@ -288,16 +360,16 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
                        &phys, virt);
                return;
        }
-       __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
-                        size, prot, early_alloc);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+                            NULL);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               unsigned long virt, phys_addr_t size,
                               pgprot_t prot)
 {
-       __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
-                               late_alloc);
+       __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
+                            late_pgtable_alloc);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -309,69 +381,48 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
                return;
        }
 
-       return __create_mapping(&init_mm, pgd_offset_k(virt),
-                               phys, virt, size, prot, late_alloc);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+                            late_pgtable_alloc);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
+
+       unsigned long kernel_start = __pa(_stext);
+       unsigned long kernel_end = __pa(_etext);
+
        /*
-        * Set up the executable regions using the existing section mappings
-        * for now. This will get more fine grained later once all memory
-        * is mapped
+        * Take care not to create a writable alias for the
+        * read-only text and rodata sections of the kernel image.
         */
-       unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
-
-       if (end < kernel_x_start) {
-               create_mapping(start, __phys_to_virt(start),
-                       end - start, PAGE_KERNEL);
-       } else if (start >= kernel_x_end) {
-               create_mapping(start, __phys_to_virt(start),
-                       end - start, PAGE_KERNEL);
-       } else {
-               if (start < kernel_x_start)
-                       create_mapping(start, __phys_to_virt(start),
-                               kernel_x_start - start,
-                               PAGE_KERNEL);
-               create_mapping(kernel_x_start,
-                               __phys_to_virt(kernel_x_start),
-                               kernel_x_end - kernel_x_start,
-                               PAGE_KERNEL_EXEC);
-               if (kernel_x_end < end)
-                       create_mapping(kernel_x_end,
-                               __phys_to_virt(kernel_x_end),
-                               end - kernel_x_end,
-                               PAGE_KERNEL);
+
+       /* No overlap with the kernel text */
+       if (end < kernel_start || start >= kernel_end) {
+               __create_pgd_mapping(pgd, start, __phys_to_virt(start),
+                                    end - start, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+               return;
        }
 
+       /*
+        * This block overlaps the kernel text mapping. Map the portion(s) which
+        * don't overlap.
+        */
+       if (start < kernel_start)
+               __create_pgd_mapping(pgd, start,
+                                    __phys_to_virt(start),
+                                    kernel_start - start, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+       if (kernel_end < end)
+               __create_pgd_mapping(pgd, kernel_end,
+                                    __phys_to_virt(kernel_end),
+                                    end - kernel_end, PAGE_KERNEL,
+                                    early_pgtable_alloc);
 }
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
-       create_mapping(start, __phys_to_virt(start), end - start,
-                       PAGE_KERNEL_EXEC);
-}
-#endif
 
-static void __init map_mem(void)
+static void __init map_mem(pgd_t *pgd)
 {
        struct memblock_region *reg;
-       phys_addr_t limit;
-
-       /*
-        * Temporarily limit the memblock range. We need to do this as
-        * create_mapping requires puds, pmds and ptes to be allocated from
-        * memory addressable from the initial direct kernel mapping.
-        *
-        * The initial direct kernel mapping, located at swapper_pg_dir, gives
-        * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
-        * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
-        * per Documentation/arm64/booting.txt).
-        */
-       limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
-       memblock_set_current_limit(limit);
 
        /* map all the memory banks */
        for_each_memblock(memory, reg) {
@@ -383,69 +434,87 @@ static void __init map_mem(void)
                if (memblock_is_nomap(reg))
                        continue;
 
-               if (ARM64_SWAPPER_USES_SECTION_MAPS) {
-                       /*
-                        * For the first memory bank align the start address and
-                        * current memblock limit to prevent create_mapping() from
-                        * allocating pte page tables from unmapped memory. With
-                        * the section maps, if the first block doesn't end on section
-                        * size boundary, create_mapping() will try to allocate a pte
-                        * page, which may be returned from an unmapped area.
-                        * When section maps are not used, the pte page table for the
-                        * current limit is already present in swapper_pg_dir.
-                        */
-                       if (start < limit)
-                               start = ALIGN(start, SECTION_SIZE);
-                       if (end < limit) {
-                               limit = end & SECTION_MASK;
-                               memblock_set_current_limit(limit);
-                       }
-               }
-               __map_memblock(start, end);
+               __map_memblock(pgd, start, end);
        }
-
-       /* Limit no longer required. */
-       memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 }
 
-static void __init fixup_executable(void)
+void mark_rodata_ro(void)
 {
-#ifdef CONFIG_DEBUG_RODATA
-       /* now that we are actually fully mapped, make the start/end more fine grained */
-       if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_start = round_down(__pa(_stext),
-                                                        SWAPPER_BLOCK_SIZE);
+       if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
+               return;
 
-               create_mapping(aligned_start, __phys_to_virt(aligned_start),
-                               __pa(_stext) - aligned_start,
-                               PAGE_KERNEL);
-       }
+       create_mapping_late(__pa(_stext), (unsigned long)_stext,
+                               (unsigned long)_etext - (unsigned long)_stext,
+                               PAGE_KERNEL_ROX);
+}
 
-       if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_end = round_up(__pa(__init_end),
-                                                         SWAPPER_BLOCK_SIZE);
-               create_mapping(__pa(__init_end), (unsigned long)__init_end,
-                               aligned_end - __pa(__init_end),
-                               PAGE_KERNEL);
-       }
-#endif
+void fixup_init(void)
+{
+       /*
+        * Unmap the __init region but leave the VM area in place. This
+        * prevents the region from being reused for kernel modules, which
+        * is not supported by kallsyms.
+        */
+       unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void)
+static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
+                                   pgprot_t prot, struct vm_struct *vma)
 {
-       create_mapping_late(__pa(_stext), (unsigned long)_stext,
-                               (unsigned long)_etext - (unsigned long)_stext,
-                               PAGE_KERNEL_ROX);
+       phys_addr_t pa_start = __pa(va_start);
+       unsigned long size = va_end - va_start;
+
+       BUG_ON(!PAGE_ALIGNED(pa_start));
+       BUG_ON(!PAGE_ALIGNED(size));
 
+       __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+                            early_pgtable_alloc);
+
+       vma->addr       = va_start;
+       vma->phys_addr  = pa_start;
+       vma->size       = size;
+       vma->flags      = VM_MAP;
+       vma->caller     = map_kernel_chunk;
+
+       vm_area_add_early(vma);
 }
-#endif
 
-void fixup_init(void)
+/*
+ * Create fine-grained mappings for the kernel.
+ */
+static void __init map_kernel(pgd_t *pgd)
 {
-       create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
-                       (unsigned long)__init_end - (unsigned long)__init_begin,
-                       PAGE_KERNEL);
+       static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
+
+       map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+       map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+                        &vmlinux_init);
+       map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+
+       if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+               /*
+                * The fixmap falls in a separate pgd to the kernel, and doesn't
+                * live in the carveout for the swapper_pg_dir. We can simply
+                * re-use the existing dir for the fixmap.
+                */
+               set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
+                       *pgd_offset_k(FIXADDR_START));
+       } else if (CONFIG_PGTABLE_LEVELS > 3) {
+               /*
+                * The fixmap shares its top level pgd entry with the kernel
+                * mapping. This can really only occur when we are running
+                * with 16k/4 levels, so we can simply reuse the pud level
+                * entry instead.
+                */
+               BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+               set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
+                       __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+               pud_clear_fixmap();
+       } else {
+               BUG();
+       }
+
+       kasan_copy_shadow(pgd);
 }
 
 /*
@@ -454,28 +523,35 @@ void fixup_init(void)
  */
 void __init paging_init(void)
 {
-       void *zero_page;
-
-       map_mem();
-       fixup_executable();
+       phys_addr_t pgd_phys = early_pgtable_alloc();
+       pgd_t *pgd = pgd_set_fixmap(pgd_phys);
 
-       /* allocate the zero page. */
-       zero_page = early_alloc(PAGE_SIZE);
+       map_kernel(pgd);
+       map_mem(pgd);
 
-       bootmem_init();
-
-       empty_zero_page = virt_to_page(zero_page);
+       /*
+        * We want to reuse the original swapper_pg_dir so we don't have to
+        * communicate the new address to non-coherent secondaries in
+        * secondary_entry, and so cpu_switch_mm can generate the address with
+        * adrp+add rather than a load from some global variable.
+        *
+        * To do this we need to go via a temporary pgd.
+        */
+       cpu_replace_ttbr1(__va(pgd_phys));
+       memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+       cpu_replace_ttbr1(swapper_pg_dir);
 
-       /* Ensure the zero page is visible to the page table walker */
-       dsb(ishst);
+       pgd_clear_fixmap();
+       memblock_free(pgd_phys, PAGE_SIZE);
 
        /*
-        * TTBR0 is only used for the identity mapping at this stage. Make it
-        * point to zero page to avoid speculatively fetching new entries.
+        * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
+        * allocated with it.
         */
-       cpu_set_reserved_ttbr0();
-       local_flush_tlb_all();
-       cpu_set_default_tcr_t0sz();
+       memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+                     SWAPPER_DIR_SIZE - PAGE_SIZE);
+
+       bootmem_init();
 }
 
 /*
@@ -562,21 +638,13 @@ void vmemmap_free(unsigned long start, unsigned long end)
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
 static inline pud_t * fixmap_pud(unsigned long addr)
 {
        pgd_t *pgd = pgd_offset_k(addr);
 
        BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-       return pud_offset(pgd, addr);
+       return pud_offset_kimg(pgd, addr);
 }
 
 static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -585,16 +653,12 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
 
        BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
-       return pmd_offset(pud, addr);
+       return pmd_offset_kimg(pud, addr);
 }
 
 static inline pte_t * fixmap_pte(unsigned long addr)
 {
-       pmd_t *pmd = fixmap_pmd(addr);
-
-       BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
-       return pte_offset_kernel(pmd, addr);
+       return &bm_pte[pte_index(addr)];
 }
 
 void __init early_fixmap_init(void)
@@ -605,15 +669,25 @@ void __init early_fixmap_init(void)
        unsigned long addr = FIXADDR_START;
 
        pgd = pgd_offset_k(addr);
-       pgd_populate(&init_mm, pgd, bm_pud);
-       pud = pud_offset(pgd, addr);
+       if (CONFIG_PGTABLE_LEVELS > 3 && !pgd_none(*pgd)) {
+               /*
+                * We only end up here if the kernel mapping and the fixmap
+                * share the top level pgd entry, which should only happen on
+                * 16k/4 levels configurations.
+                */
+               BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+               pud = pud_offset_kimg(pgd, addr);
+       } else {
+               pgd_populate(&init_mm, pgd, bm_pud);
+               pud = fixmap_pud(addr);
+       }
        pud_populate(&init_mm, pud, bm_pmd);
-       pmd = pmd_offset(pud, addr);
+       pmd = fixmap_pmd(addr);
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
 
        /*
         * The boot-ioremap range spans multiple pmds, for which
-        * we are not preparted:
+        * we are not prepared:
         */
        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -673,7 +747,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        /*
         * Make sure that the FDT region can be mapped without the need to
         * allocate additional translation table pages, so that it is safe
-        * to call create_mapping() this early.
+        * to call create_mapping_noalloc() this early.
         *
         * On 64k pages, the FDT will be mapped using PTEs, so we need to
         * be in the same PMD as the rest of the fixmap.
@@ -689,8 +763,8 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        dt_virt = (void *)dt_virt_base + offset;
 
        /* map the first chunk so we can read the size from the header */
-       create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-                      SWAPPER_BLOCK_SIZE, prot);
+       create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+                       dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 
        if (fdt_check_header(dt_virt) != 0)
                return NULL;
@@ -700,10 +774,51 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
                return NULL;
 
        if (offset + size > SWAPPER_BLOCK_SIZE)
-               create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+               create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
                               round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
 
        memblock_reserve(dt_phys, size);
 
        return dt_virt;
 }
+
+int __init arch_ioremap_pud_supported(void)
+{
+       /* only 4k granule supports level 1 block mappings */
+       return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+       return 1;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+       BUG_ON(phys & ~PUD_MASK);
+       set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+       return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+       BUG_ON(phys & ~PMD_MASK);
+       set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+       return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+       if (!pud_sect(*pud))
+               return 0;
+       pud_clear(pud);
+       return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+       if (!pmd_sect(*pmd))
+               return 0;
+       pmd_clear(pmd);
+       return 1;
+}
index 3571c7309c5e79f0d2d3e20986a581ffb6454dae..ca6d268e3313229b0941ce7d33439c7a4c861120 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -36,14 +37,32 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
        return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       struct page_change_data data;
+       int ret;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                       &data);
+
+       flush_tlb_kernel_range(start, start + size);
+       return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr;
        unsigned long size = PAGE_SIZE*numpages;
        unsigned long end = start + size;
-       int ret;
-       struct page_change_data data;
+       struct vm_struct *area;
 
        if (!PAGE_ALIGNED(addr)) {
                start &= PAGE_MASK;
@@ -51,20 +70,29 @@ static int change_memory_common(unsigned long addr, int numpages,
                WARN_ON_ONCE(1);
        }
 
-       if (start < MODULES_VADDR || start >= MODULES_END)
-               return -EINVAL;
-
-       if (end < MODULES_VADDR || end >= MODULES_END)
+       /*
+        * Kernel VA mappings are always live, and splitting live section
+        * mappings into page mappings may cause TLB conflicts. This means
+        * we have to ensure that changing the permission bits of the range
+        * we are operating on does not result in such splitting.
+        *
+        * Let's restrict ourselves to mappings created by vmalloc (or vmap).
+        * Those are guaranteed to consist entirely of page mappings, and
+        * splitting is never needed.
+        *
+        * So check whether the [addr, addr + size) interval is entirely
+        * covered by precisely one VM area that has the VM_ALLOC flag set.
+        */
+       area = find_vm_area((void *)addr);
+       if (!area ||
+           end > (unsigned long)area->addr + area->size ||
+           !(area->flags & VM_ALLOC))
                return -EINVAL;
 
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                       &data);
+       if (!numpages)
+               return 0;
 
-       flush_tlb_kernel_range(start, end);
-       return ret;
+       return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -96,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages)
                                        __pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       unsigned long addr = (unsigned long) page_address(page);
+
+       if (enable)
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(PTE_VALID),
+                                       __pgprot(0));
+       else
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(0),
+                                       __pgprot(PTE_VALID));
+}
+#endif
index 146bd99a7532bcc6f53ad509a839561640eeaf3e..e6a30e1268a8c7570c8298797d7cf27dcb0d649f 100644 (file)
        b.lo    9998b
        dsb     \domain
        .endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+       .macro  reset_pmuserenr_el0, tmpreg
+       mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
+       sbfx    \tmpreg, \tmpreg, #8, #4
+       cmp     \tmpreg, #1                     // Skip if no PMU present
+       b.lt    9000f
+       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+9000:
+       .endm
index a3d867e723b4f0c5c23283c89264d4a2f7f51561..0c19534a901e616ecc5fe508ce205dc0de8fe0f4 100644 (file)
@@ -117,7 +117,7 @@ ENTRY(cpu_do_resume)
         */
        ubfx    x11, x11, #1, #1
        msr     oslar_el1, x11
-       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
        mov     x0, x12
        dsb     nsh             // Make sure local tlb invalidation completed
        isb
@@ -140,6 +140,34 @@ ENTRY(cpu_do_switch_mm)
        ret
 ENDPROC(cpu_do_switch_mm)
 
+       .pushsection ".idmap.text", "ax"
+/*
+ * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ *
+ * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
+ * called by anything else. It can only be executed from a TTBR0 mapping.
+ */
+ENTRY(idmap_cpu_replace_ttbr1)
+       mrs     x2, daif
+       msr     daifset, #0xf
+
+       adrp    x1, empty_zero_page
+       msr     ttbr1_el1, x1
+       isb
+
+       tlbi    vmalle1
+       dsb     nsh
+       isb
+
+       msr     ttbr1_el1, x0
+       isb
+
+       msr     daif, x2
+
+       ret
+ENDPROC(idmap_cpu_replace_ttbr1)
+       .popsection
+
 /*
  *     __cpu_setup
  *
@@ -154,7 +182,7 @@ ENTRY(__cpu_setup)
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
        mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
        msr     mdscr_el1, x0                   // access to the DCC from EL0
-       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
        /*
         * Memory region attributes for LPAE:
         *
index 4ce9fa874a577e559d5e916a24ddfb71eef6fcdd..6ae884bf66a5268c9dba08db6111df7dc9f40d06 100644 (file)
                reg = <0xffff78 8>;
                interrupts = <88 0>, <89 0>, <90 0>, <91 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
        sci1: serial@ffff80 {
                compatible = "renesas,sci";
                reg = <0xffff80 8>;
                interrupts = <92 0>, <93 0>, <94 0>, <95 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
        sci2: serial@ffff88 {
                compatible = "renesas,sci";
                reg = <0xffff88 8>;
                interrupts = <96 0>, <97 0>, <98 0>, <99 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 };
index 545bfb57af9a9f2e8a81ff38aa46dcedf75c3757..9c733d920f1f0dc3eab70dd53e76d22a4860c3f3 100644 (file)
@@ -83,7 +83,7 @@
                reg = <0xffffb0 8>;
                interrupts = <52 0>, <53 0>, <54 0>, <55 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 
        sci1: serial@ffffb8 {
@@ -91,6 +91,6 @@
                reg = <0xffffb8 8>;
                interrupts = <56 0>, <57 0>, <58 0>, <59 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 };
index bcedba5a3ce7e8ae8fefaa195d86261b8ed8ba28..97e1f4b17ef067d5f10738e8aed5983359ce61c4 100644 (file)
                reg = <0xffff78 8>;
                interrupts = <88 0>, <89 0>, <90 0>, <91 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
        sci1: serial@ffff80 {
                compatible = "renesas,sci";
                reg = <0xffff80 8>;
                interrupts = <92 0>, <93 0>, <94 0>, <95 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 };
index 836ac5a963c83140d18dcd762459d19e1fd0c101..2841c0a3fd3bb201a4bed53d823858b32a0a009b 100644 (file)
@@ -276,6 +276,7 @@ source "kernel/Kconfig.preempt"
 
 config SMP
        bool "Symmetric multi-processing support"
+       depends on MMU
        ---help---
          This enables support for systems with more than one CPU. If you have
          a system with only one CPU, say N. If you have a system with more
index 71ea4c02795d45438727059b708731b0babbf220..a0fc0c192427ef995f3ae7029ade5d592e6d6b1f 100644 (file)
@@ -89,7 +89,7 @@ static struct platform_device mcf_uart = {
        .dev.platform_data      = mcf_uart_platform_data,
 };
 
-#ifdef CONFIG_FEC
+#if IS_ENABLED(CONFIG_FEC)
 
 #ifdef CONFIG_M5441x
 #define FEC_NAME       "enet-fec"
@@ -329,7 +329,7 @@ static struct platform_device mcf_qspi = {
 
 static struct platform_device *mcf_devices[] __initdata = {
        &mcf_uart,
-#ifdef CONFIG_FEC
+#if IS_ENABLED(CONFIG_FEC)
        &mcf_fec0,
 #ifdef MCFFEC_BASE1
        &mcf_fec1,
index fc96e814188e57aa9dee8ed516c62200c543ad56..d1fc4796025edb8769ee01195e64b91821f3625a 100644 (file)
@@ -108,6 +108,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@ CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
index 05c904f08d9d496fb7455df0d1506fb8d8c9bc23..9bfe8be3658c18231ca4473d8c075a1fb2ac4d5e 100644 (file)
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index d572b731c510fdb2dc60616ee1ff0ce7e10516ce..ebdcfae555801cd1c7367d4ca40974b3d39099cb 100644 (file)
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@ CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
index 11a30c65ad44cb52347929d51f80301c8aca648b..8acc65e54995388614666716febdab1dda706376 100644 (file)
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 6630a5154b9d797ebea93cdbe70886bc673f60cd..0c6a3d52b26e2b2559f24963040d3ba65a91ed47 100644 (file)
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 1d90b71d09038da90cfad0cab267d60dae0fd752..12a8a6cb32f4914f06c1f5d4c8e5dd6ba31381ef 100644 (file)
@@ -105,6 +105,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@ CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
index 1fd21c1ca87fd8da85ace8c64930cfeed4383370..64ff2dcb34c89a3e60e26f9c468d654ed943e97f 100644 (file)
@@ -115,6 +115,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@ CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
 CONFIG_NE2000=y
index 74e10f79d7b1f9475574d422451314b7bcc1af64..07fc6abcfe0c50e4b656a63a9da36c728b13cec4 100644 (file)
@@ -103,6 +103,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 7034e716f166be8f869f872b12f9cbf960054029..69903ded88f71d1d51ad4b430acbd2f745fdf867 100644 (file)
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index f7deb5f702a6484dda646577f48ade90b902bf18..bd8401686ddef143bf036159cb3f4ea650772f32 100644 (file)
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
index 0ce79eb0d80503140d928c3c6c77061a2ead34d9..5f9fb3ab9636808d46b75f3696f477ab91e6dd79 100644 (file)
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 4cb787e4991fcfd02646b1144999419b40454f83..5d1c674530e2ba73ca43ffc4940f139772962152 100644 (file)
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index f9d96bf869109c028e5a9f1f12ad3e9fe8b933ba..bafaff6dcd7bda8a28101f140159f9a7a76638db 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            376
+#define NR_syscalls            377
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 36cf129de663a7ca22f1bf1bba5a6245b7b04c03..0ca729665f29e9d67851aed2c1c52be75bbd078b 100644 (file)
 #define __NR_userfaultfd       373
 #define __NR_membarrier                374
 #define __NR_mlock2            375
+#define __NR_copy_file_range   376
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 282cd903f4c469197738eb9e840eaa75c77ec11a..8bb94261ff97d953fcfbe7c305e6a7ecfce27a97 100644 (file)
@@ -396,3 +396,4 @@ ENTRY(sys_call_table)
        .long sys_userfaultfd
        .long sys_membarrier
        .long sys_mlock2                /* 375 */
+       .long sys_copy_file_range
index ac8c039b0318fe48929bff8f60ec4f9d58740120..f7b23d300881b9bf1409c0afd80c4bfd93e27c79 100644 (file)
@@ -115,7 +115,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        return ftrace_modify_code(ip, old, new);
 }
 
-/* run from kstop_machine */
 int __init ftrace_dyn_arch_init(void)
 {
        return 0;
index 76ed17b56fead0092462c7f5498767b281c0339a..805ae5d712e8baa63095199f1601ce548c26d606 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         389
+#define __NR_syscalls         392
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index 32850c73be09b915309fe892486c0d5187eed0a5..a8bd3fa28bc7f4e97158fff49d25443524647b0d 100644 (file)
 #define __NR_memfd_create      386
 #define __NR_bpf               387
 #define __NR_execveat          388
+#define __NR_userfaultfd       389
+#define __NR_membarrier                390
+#define __NR_mlock2            391
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 29c8568ec55c32776139cd79e1e441d23638078d..6b3dd99126d753a22a9ed270ec92761c2f936e27 100644 (file)
@@ -389,3 +389,6 @@ ENTRY(sys_call_table)
        .long sys_memfd_create
        .long sys_bpf
        .long sys_execveat
+       .long sys_userfaultfd
+       .long sys_membarrier            /* 390 */
+       .long sys_mlock2
index 57a945e832f43ff711fe04d1fa8a52fe1970f000..0f6b20a702feb6588f35eb92cc27aef171ad3bef 100644 (file)
@@ -137,7 +137,7 @@ config ATH79
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_MIPS16
-       select SYS_SUPPORTS_ZBOOT
+       select SYS_SUPPORTS_ZBOOT_UART_PROM
        select USE_OF
        help
          Support for the Atheros AR71XX/AR724X/AR913X SoCs.
index 13c04cf54afac93bd2c0d8fdaa852ecd9911055e..dfc60209dc63ca231a687175a9e9545c1cd9833a 100644 (file)
@@ -71,18 +71,6 @@ config ATH79_MACH_UBNT_XM
          Say 'Y' here if you want your kernel to support the
          Ubiquiti Networks XM (rev 1.0) board.
 
-choice
-       prompt "Build a DTB in the kernel"
-       optional
-       help
-         Select a devicetree that should be built into the kernel.
-
-       config DTB_TL_WR1043ND_V1
-               bool "TL-WR1043ND Version 1"
-               select BUILTIN_DTB
-               select SOC_AR913X
-endchoice
-
 endmenu
 
 config SOC_AR71XX
index be451ee4a5eaf5b8ba6de0d11fc8c76f6d6db7fa..01808e85e263d5be0f87d07bbaa4ddd9b670c1c6 100644 (file)
@@ -203,10 +203,8 @@ void __init plat_mem_setup(void)
        fdt_start = fw_getenvl("fdt_start");
        if (fdt_start)
                __dt_setup_arch((void *)KSEG0ADDR(fdt_start));
-#ifdef CONFIG_BUILTIN_DTB
-       else
-               __dt_setup_arch(__dtb_start);
-#endif
+       else if (fw_arg0 == -2)
+               __dt_setup_arch((void *)KSEG0ADDR(fw_arg1));
 
        ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
                                           AR71XX_RESET_SIZE);
@@ -215,10 +213,11 @@ void __init plat_mem_setup(void)
        ath79_detect_sys_type();
        ath79_ddr_ctrl_init();
 
-       if (mips_machtype != ATH79_MACH_GENERIC_OF)
+       if (mips_machtype != ATH79_MACH_GENERIC_OF) {
                detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
-
-       _machine_restart = ath79_restart;
+               /* OF machines should use the reset driver */
+               _machine_restart = ath79_restart;
+       }
        _machine_halt = ath79_halt;
        pm_power_off = ath79_halt;
 }
index 5f2bc1e10eae60c314663685f0e7dbdd84757ad3..05757aed016cd6f72bedbd0ac31f379e8f0cd884 100644 (file)
@@ -19,6 +19,8 @@
 
 #include <bcm63xx_nvram.h>
 
+#define BCM63XX_DEFAULT_PSI_SIZE       64
+
 static struct bcm963xx_nvram nvram;
 static int mac_addr_used;
 
@@ -85,3 +87,12 @@ int bcm63xx_nvram_get_mac_address(u8 *mac)
        return 0;
 }
 EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address);
+
+int bcm63xx_nvram_get_psi_size(void)
+{
+       if (nvram.psi_size > 0)
+               return nvram.psi_size;
+
+       return BCM63XX_DEFAULT_PSI_SIZE;
+}
+EXPORT_SYMBOL(bcm63xx_nvram_get_psi_size);
index 4eff1ef02eff9abec4ae03d4248f144ef8fa0058..90aca95fe3149696d772e3ae41cdcfa388d79b80 100644 (file)
@@ -37,12 +37,18 @@ vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT)              += $(obj)/dbg.o
 vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
 vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
 vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY)                += $(obj)/uart-alchemy.o
+vmlinuzobjs-$(CONFIG_ATH79)                       += $(obj)/uart-ath79.o
 endif
 
-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
+extra-y += uart-ath79.c
+$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
+       $(call cmd,shipped)
+
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
 
-$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
-$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c
+extra-y += ashldi3.c bswapsi.c
+$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
+$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
        $(call cmd,shipped)
 
 targets := $(notdir $(vmlinuzobjs-y))
index 459b9b252c3b73fe2c95362177655b3a6f154a6f..d61b1616b604552be9a30f89eb42539de6838b14 100644 (file)
@@ -74,6 +74,7 @@
                timer: timer@10000040 {
                        compatible = "syscon";
                        reg = <0x10000040 0x2c>;
+                       little-endian;
                };
 
                reboot {
index 4fc7ecee273c105027ff20cea02f2bd5cf86c9fe..1a7efa883c5e3fd2e046b554485270036193b382 100644 (file)
@@ -98,6 +98,7 @@
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x60c>;
+                       little-endian;
                };
 
                reboot {
index a3039bb53477d40a426fec1d1318f3156ede8be3..d4bf52cfcf170ee8ac84daa874495e0a6420e542 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       little-endian;
                };
 
                reboot {
index 4274ff41ec2122ac0bfd52d814aee8432c26553e..8e2501694d03fbd93827aeda79ef22f7cfd5d094 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       little-endian;
                };
 
                reboot {
index 0dcc9163c27bdd0022e5b66f3ffd65da7fd6ce31..7e5f76040fb898b19a4bbc301c8a20f3b9368aa4 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       little-endian;
                };
 
                reboot {
index 2f3f9fc2c478df36ef57c5990dd81f6757240e3a..c739ea77acb0dfe17363ec52cf390cace407e54c 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       little-endian;
                };
 
                reboot {
index bee221b3b56857c8d84dac3e2fa9bfe8b3c54a85..5f55d0a50a28622614ec6142eb0ff19746dfaade 100644 (file)
@@ -99,6 +99,7 @@
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x60c>;
+                       little-endian;
                };
 
                reboot {
index 571f30f52e3ff5780ec4fd72e18e72737e51537d..e24d41ab4e30f9163605180d78605fc02a477db6 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       little-endian;
                };
 
                reboot {
index 614ee211f71a89356dd1eb814a38ec3071885747..8b9432cc062bc7e89898f2f1f2213926193389f8 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       little-endian;
                };
 
                reboot {
index 2d61455d585d2d59f87616d872db5297b98b2ce5..14bd225142832f15822fd44513380eaa4b72f2e6 100644 (file)
@@ -1,9 +1,6 @@
 # All DTBs
 dtb-$(CONFIG_ATH79)                    += ar9132_tl_wr1043nd_v1.dtb
 
-# Select a DTB to build in the kernel
-obj-$(CONFIG_DTB_TL_WR1043ND_V1)       += ar9132_tl_wr1043nd_v1.dtb.o
-
 # Force kbuild to make empty built-in.o if necessary
 obj-                           += dummy.o
 
index b7fa9ae28c3659dbf457aecd7cd17255cd34f5da..0449d935db0acde8bf34254c1f27627b7ab1e8b1 100644 (file)
@@ -242,7 +242,7 @@ static int octeon_cpu_disable(void)
        cpumask_clear_cpu(cpu, &cpu_callin_map);
        octeon_fixup_irqs();
 
-       flush_cache_all();
+       __flush_cache_all();
        local_flush_tlb_all();
 
        return 0;
index 01880b34a209b37dd5b02d759afda543f7989e9a..64f2500d891b279493f422129fcb2093180a2527 100644 (file)
 
 #ifdef __KERNEL__
 
+#include <linux/bug.h>
 #include <linux/interrupt.h>
 #include <linux/uaccess.h>
+#include <asm/cpu-features.h>
 #include <asm/kmap_types.h>
 
 /* undef for production */
@@ -50,7 +52,7 @@ extern void *kmap_atomic(struct page *page);
 extern void __kunmap_atomic(void *kvaddr);
 extern void *kmap_atomic_pfn(unsigned long pfn);
 
-#define flush_cache_kmaps()    flush_cache_all()
+#define flush_cache_kmaps()    BUG_ON(cpu_has_dc_aliases)
 
 extern void kmap_init(void);
 
index 4e0b6bc1165edcbae2f44663390daa2c63c017d9..348df49dcc9f35ad7ce4129bde4ab66d1a103b00 100644 (file)
@@ -30,4 +30,6 @@ u8 *bcm63xx_nvram_get_name(void);
  */
 int bcm63xx_nvram_get_mac_address(u8 *mac);
 
+int bcm63xx_nvram_get_psi_size(void);
+
 #endif /* BCM63XX_NVRAM_H */
index 79cff26d8b36f16cb333d9af2e56383db72b4222..398733e3e2cf65006541cf5c3eff6c68b06b0eb0 100644 (file)
@@ -25,8 +25,6 @@ struct jz_nand_platform_data {
        int                     num_partitions;
        struct mtd_partition    *partitions;
 
-       struct nand_ecclayout   *ecc_layout;
-
        unsigned char banks[JZ_NAND_NUM_BANKS];
 
        void (*ident_callback)(struct platform_device *, struct nand_chip *,
index 98c31e5d95793c68b50d594a5152bd0c2a730c82..108d19376bb126161eea30284b30776cd7b4d58b 100644 (file)
@@ -102,7 +102,6 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
 #include <linux/scatterlist.h>
 #include <linux/string.h>
 #include <asm/io.h>
-#include <asm-generic/pci-bridge.h>
 
 struct pci_dev;
 
index 509832a9836c9ae70f52aa422ab70f85d7f8971c..b913cd240d77f2c9d6d1ca41838c0a61829064a6 100644 (file)
@@ -421,7 +421,6 @@ static int loongson3_cpu_disable(void)
        local_irq_save(flags);
        fixup_irqs();
        local_irq_restore(flags);
-       flush_cache_all();
        local_flush_tlb_all();
 
        return 0;
index 9d293b3e9130152af49f964e530ece27e482fd7b..a63b73610fd490d1c58293885b474e929f5bd1be 100644 (file)
@@ -118,7 +118,7 @@ void msp_restart(char *command)
        /* No chip-specific reset code, just jump to the ROM reset vector */
        set_c0_status(ST0_BEV | ST0_ERL);
        change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-       flush_cache_all();
+       __flush_cache_all();
        write_c0_wired(0);
 
        __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
index d7f755833c3f43431b48f38b296da4df34d993e6..39a0db3e2b346f4d9164eca8935a2e4d39663026 100644 (file)
@@ -73,7 +73,7 @@ static inline void software_reset(void)
        default:
                set_c0_status(ST0_BEV | ST0_ERL);
                change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-               flush_cache_all();
+               __flush_cache_all();
                write_c0_wired(0);
                __asm__("jr     %0"::"r"(0xbfc00000));
                break;
index e4824fd04bb7449d262c1a7697b5f539b03f6bab..9faa18c4f3f702adceb4f555b05b72bc8437cf6c 100644 (file)
@@ -557,7 +557,7 @@ choice
 
 config PPC_4K_PAGES
        bool "4k page size"
-       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+       select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
 
 config PPC_16K_PAGES
        bool "16k page size"
@@ -566,7 +566,7 @@ config PPC_16K_PAGES
 config PPC_64K_PAGES
        bool "64k page size"
        depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
-       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+       select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
 
 config PPC_256K_PAGES
        bool "256k page size"
index 06f17e778c2750edd0b2380288da820f0c40f130..8d1c8162f0c1078d1842d754b64a078be8e1ea0e 100644 (file)
@@ -50,7 +50,9 @@
  * set of bits not changed in pmd_modify.
  */
 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
-                        _PAGE_ACCESSED | _PAGE_THP_HUGE)
+                        _PAGE_ACCESSED | _PAGE_THP_HUGE | _PAGE_PTE | \
+                        _PAGE_SOFT_DIRTY)
+
 
 #ifdef CONFIG_PPC_64K_PAGES
 #include <asm/book3s/64/hash-64k.h>
index 8204b0c393aac69285a666d1b70fae0961053614..ac07a30a7934265ed98706efb9f2c82ac8db85ce 100644 (file)
@@ -223,7 +223,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
 #define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
 #define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
 #define pmd_young(pmd)         pte_young(pmd_pte(pmd))
-#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
 #define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
@@ -282,6 +281,10 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp);
 
+#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+                                   unsigned long address, pmd_t *pmdp);
+
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 struct spinlock;
 static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
index c5eb86f3d452fbe66d44ae1cce9bbfff91a8b14d..867c39b45df6ce4c1bd5a342ca314a888bb185bf 100644 (file)
@@ -81,6 +81,7 @@ struct pci_dn;
 #define EEH_PE_KEEP            (1 << 8)        /* Keep PE on hotplug   */
 #define EEH_PE_CFG_RESTRICTED  (1 << 9)        /* Block config on error */
 #define EEH_PE_REMOVED         (1 << 10)       /* Removed permanently  */
+#define EEH_PE_PRI_BUS         (1 << 11)       /* Cached primary bus   */
 
 struct eeh_pe {
        int type;                       /* PE type: PHB/Bus/Device      */
index 271fefbbe521badbc1a232b8f3583fb10833388b..9d08d8cbed1a1ec0e5893679c5e1fd9cb69dce09 100644 (file)
@@ -38,8 +38,7 @@
 
 #define KVM_MAX_VCPUS          NR_CPUS
 #define KVM_MAX_VCORES         NR_CPUS
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
+#define KVM_USER_MEM_SLOTS     512
 
 #ifdef CONFIG_KVM_MMIO
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
index 54843ca5fa2bfa03871f28a3854b0d87f5685a76..78968c1ff931941d8244364cedd7f664f352e763 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/pci.h>
 #include <linux/list.h>
 #include <linux/ioport.h>
-#include <asm-generic/pci-bridge.h>
 
 struct device_node;
 
index 5654ece02c0db5ee41da441c30d8a2c5f1cdcbc4..3fa9df70aa20dfa01e90eb1a6af06171a1d8fbde 100644 (file)
@@ -383,3 +383,4 @@ SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(mlock2)
+SYSCALL(copy_file_range)
index 8e86b48d03699047dda0f493a3955c8c05e34909..32e36b16773fd876a7b246ddb9e23c28193c3570 100644 (file)
@@ -57,12 +57,14 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
 extern void hcall_tracepoint_regfunc(void);
 extern void hcall_tracepoint_unregfunc(void);
 
-TRACE_EVENT_FN(hcall_entry,
+TRACE_EVENT_FN_COND(hcall_entry,
 
        TP_PROTO(unsigned long opcode, unsigned long *args),
 
        TP_ARGS(opcode, args),
 
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(unsigned long, opcode)
        ),
@@ -76,13 +78,15 @@ TRACE_EVENT_FN(hcall_entry,
        hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
 );
 
-TRACE_EVENT_FN(hcall_exit,
+TRACE_EVENT_FN_COND(hcall_exit,
 
        TP_PROTO(unsigned long opcode, unsigned long retval,
                unsigned long *retbuf),
 
        TP_ARGS(opcode, retval, retbuf),
 
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(unsigned long, opcode)
                __field(unsigned long, retval)
index 6a5ace5fa0c8a8bbf83a73a5fca8fb0150d5d3e8..1f2594d456054262bd2bc4a9eef265c983971afa 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            379
+#define NR_syscalls            380
 
 #define __NR__exit __NR_exit
 
index 12a05652377a28799a89ae59e3d38a7ddeaade92..940290d45b087220711cd0a508ae5c4223b21951 100644 (file)
 #define __NR_userfaultfd       364
 #define __NR_membarrier                365
 #define __NR_mlock2            378
+#define __NR_copy_file_range   379
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 938742135ee08fc8dd058df690cfba7eacdabc0b..301be3126ae3e1bcbe6b3f8aabce07d2416c89e9 100644 (file)
@@ -564,6 +564,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
         */
        eeh_pe_state_mark(pe, EEH_PE_KEEP);
        if (bus) {
+               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                pci_lock_rescan_remove();
                pcibios_remove_pci_devices(bus);
                pci_unlock_rescan_remove();
@@ -803,6 +804,7 @@ perm_error:
         * the their PCI config any more.
         */
        if (frozen_bus) {
+               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
 
                pci_lock_rescan_remove();
@@ -886,6 +888,7 @@ static void eeh_handle_special_event(void)
                                        continue;
 
                                /* Notify all devices to be down */
+                               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                                bus = eeh_pe_bus_get(phb_pe);
                                eeh_pe_dev_traverse(pe,
                                        eeh_report_failure, NULL);
index 8654cb166c19b04fea827756513a1d23e4d92272..98f81800e00c1030b69c80d79eb8d43edbda9728 100644 (file)
@@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
 const char *eeh_pe_loc_get(struct eeh_pe *pe)
 {
        struct pci_bus *bus = eeh_pe_bus_get(pe);
-       struct device_node *dn = pci_bus_to_OF_node(bus);
+       struct device_node *dn;
        const char *loc = NULL;
 
-       if (!dn)
-               goto out;
+       while (bus) {
+               dn = pci_bus_to_OF_node(bus);
+               if (!dn) {
+                       bus = bus->parent;
+                       continue;
+               }
 
-       /* PHB PE or root PE ? */
-       if (pci_is_root_bus(bus)) {
-               loc = of_get_property(dn, "ibm,loc-code", NULL);
-               if (!loc)
+               if (pci_is_root_bus(bus))
                        loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
+               else
+                       loc = of_get_property(dn, "ibm,slot-location-code",
+                                             NULL);
+
                if (loc)
-                       goto out;
+                       return loc;
 
-               /* Check the root port */
-               dn = dn->child;
-               if (!dn)
-                       goto out;
+               bus = bus->parent;
        }
 
-       loc = of_get_property(dn, "ibm,loc-code", NULL);
-       if (!loc)
-               loc = of_get_property(dn, "ibm,slot-location-code", NULL);
-
-out:
-       return loc ? loc : "N/A";
+       return "N/A";
 }
 
 /**
@@ -931,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
                bus = pe->phb->bus;
        } else if (pe->type & EEH_PE_BUS ||
                   pe->type & EEH_PE_DEVICE) {
-               if (pe->bus) {
+               if (pe->state & EEH_PE_PRI_BUS) {
                        bus = pe->bus;
                        goto out;
                }
index db475d41b57a5d4b5313d11f4494792c6070dc67..f28754c497e506cb3291462f94d422822be4e29d 100644 (file)
@@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence)
        li      r5,0
        blr     /* image->start(physid, image->start, 0); */
 #endif /* CONFIG_KEXEC */
-
-#ifdef CONFIG_MODULES
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-
-#ifdef CONFIG_MODVERSIONS
-.weak __crc_TOC.
-.section "___kcrctab+TOC.","a"
-.globl __kcrctab_TOC.
-__kcrctab_TOC.:
-       .llong  __crc_TOC.
-#endif
-
-/*
- * Export a fake .TOC. since both modpost and depmod will complain otherwise.
- * Both modpost and depmod strip the leading . so we do the same here.
- */
-.section "__ksymtab_strings","a"
-__kstrtab_TOC.:
-       .asciz "TOC."
-
-.section "___ksymtab+TOC.","a"
-/* This symbol name is important: it's used by modpost to find exported syms */
-.globl __ksymtab_TOC.
-__ksymtab_TOC.:
-       .llong 0 /* .value */
-       .llong __kstrtab_TOC.
-#endif /* ELFv2 */
-#endif /* MODULES */
index 59663af9315fc16123cd4aacb090a6efd8cbc33d..08b7a40de5f85ab1c6f7e5ed205ca52edd5ab12c 100644 (file)
@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers,
                }
 }
 
-/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
+/*
+ * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
+ * seem to be defined (value set later).
+ */
 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 {
        unsigned int i;
@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
        for (i = 1; i < numsyms; i++) {
                if (syms[i].st_shndx == SHN_UNDEF) {
                        char *name = strtab + syms[i].st_name;
-                       if (name[0] == '.')
-                               memmove(name, name+1, strlen(name));
+                       if (name[0] == '.') {
+                               if (strcmp(name+1, "TOC.") == 0)
+                                       syms[i].st_shndx = SHN_ABS;
+                               syms[i].st_name++;
+                       }
                }
        }
 }
@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
        numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
 
        for (i = 1; i < numsyms; i++) {
-               if (syms[i].st_shndx == SHN_UNDEF
+               if (syms[i].st_shndx == SHN_ABS
                    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
                        return &syms[i];
        }
index 774a253ca4e1eaa2ab9c8277d22cbcae0d85f676..9bf7031a67ffc0aedf742e7a9da6afebeb8fe64b 100644 (file)
@@ -377,15 +377,12 @@ no_seg_found:
 
 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
 {
-       struct kvmppc_vcpu_book3s *vcpu_book3s;
        u64 esid, esid_1t;
        int slb_nr;
        struct kvmppc_slb *slbe;
 
        dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
 
-       vcpu_book3s = to_book3s(vcpu);
-
        esid = GET_ESID(rb);
        esid_1t = GET_ESID_1T(rb);
        slb_nr = rb & 0xfff;
index cff207b72c46ae0b54cf48db5e4a3aa828f2fc29..baeddb06811d738a6ea8be4ce920e5ea39a9645a 100644 (file)
@@ -833,6 +833,24 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
        vcpu->stat.sum_exits++;
 
+       /*
+        * This can happen if an interrupt occurs in the last stages
+        * of guest entry or the first stages of guest exit (i.e. after
+        * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
+        * and before setting it to KVM_GUEST_MODE_HOST_HV).
+        * That can happen due to a bug, or due to a machine check
+        * occurring at just the wrong time.
+        */
+       if (vcpu->arch.shregs.msr & MSR_HV) {
+               printk(KERN_EMERG "KVM trap in HV mode!\n");
+               printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+                       vcpu->arch.trap, kvmppc_get_pc(vcpu),
+                       vcpu->arch.shregs.msr);
+               kvmppc_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               run->hw.hardware_exit_reason = vcpu->arch.trap;
+               return RESUME_HOST;
+       }
        run->exit_reason = KVM_EXIT_UNKNOWN;
        run->ready_for_interrupt_injection = 1;
        switch (vcpu->arch.trap) {
index 3c6badcd53efced649af69ff9a4adf823b28d9a0..6ee26de9a1ded02e0e08f376fd612049eedee0b4 100644 (file)
@@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
        /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
 2:     rlwimi  r5, r4, 5, DAWRX_DR | DAWRX_DW
-       rlwimi  r5, r4, 1, DAWRX_WT
+       rlwimi  r5, r4, 2, DAWRX_WT
        clrrdi  r4, r4, 3
        std     r4, VCPU_DAWR(r3)
        std     r5, VCPU_DAWRX(r3)
@@ -2404,6 +2404,8 @@ machine_check_realmode:
         * guest as machine check causing guest to crash.
         */
        ld      r11, VCPU_MSR(r9)
+       rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
+       bne     mc_cont                 /* if so, exit to host */
        andi.   r10, r11, MSR_RI        /* check for unrecoverable exception */
        beq     1f                      /* Deliver a machine check to guest */
        ld      r10, VCPU_PC(r9)
index 6fd2405c7f4aa2e5bdc5ae417a409b33b6c5f7bd..a3b182dcb823640540fcaeac38bc7d89550fddd6 100644 (file)
@@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
                                r = -ENXIO;
                                break;
                        }
-                       vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+                       val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
                        break;
                case KVM_REG_PPC_VSCR:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
                                r = -ENXIO;
                                break;
                        }
-                       vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+                       val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
                        break;
                case KVM_REG_PPC_VRSAVE:
-                       if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-                               r = -ENXIO;
-                               break;
-                       }
-                       vcpu->arch.vrsave = set_reg_val(reg->id, val);
+                       val = get_reg_val(reg->id, vcpu->arch.vrsave);
                        break;
 #endif /* CONFIG_ALTIVEC */
                default:
@@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
                                r = -ENXIO;
                                break;
                        }
-                       val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+                       vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
                        break;
                case KVM_REG_PPC_VSCR:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
                                r = -ENXIO;
                                break;
                        }
-                       val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+                       vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
                        break;
                case KVM_REG_PPC_VRSAVE:
-                       val = get_reg_val(reg->id, vcpu->arch.vrsave);
+                       if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+                               r = -ENXIO;
+                               break;
+                       }
+                       vcpu->arch.vrsave = set_reg_val(reg->id, val);
                        break;
 #endif /* CONFIG_ALTIVEC */
                default:
index 22d94c3e6fc43bf7afbd63bfd483074a9e18589a..d0f0a514b04ed66da35d1ae6b27b964285ef2aec 100644 (file)
@@ -560,12 +560,12 @@ subsys_initcall(add_system_ram_resources);
  */
 int devmem_is_allowed(unsigned long pfn)
 {
+       if (page_is_rtas_user_buf(pfn))
+               return 1;
        if (iomem_is_exclusive(PFN_PHYS(pfn)))
                return 0;
        if (!page_is_ram(pfn))
                return 1;
-       if (page_is_rtas_user_buf(pfn))
-               return 1;
        return 0;
 }
 #endif /* CONFIG_STRICT_DEVMEM */
index 3124a20d0fab7a66b3a170356037da0d18c9da85..593341f2cf11ef8e07a0c07c561e9be762dbdbf5 100644 (file)
@@ -646,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
        return pgtable;
 }
 
+void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+                            unsigned long address, pmd_t *pmdp)
+{
+       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+       VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
+
+       /*
+        * We can't mark the pmd none here, because that will cause a race
+        * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
+        * we spilt, but at the same time we wan't rest of the ppc64 code
+        * not to insert hash pte on this, because we will be modifying
+        * the deposited pgtable in the caller of this function. Hence
+        * clear the _PAGE_USER so that we move the fault handling to
+        * higher level function and that will serialize against ptl.
+        * We need to flush existing hash pte entries here even though,
+        * the translation is still valid, because we will withdraw
+        * pgtable_t after this.
+        */
+       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
+}
+
+
 /*
  * set a new huge pmd. We should not be called for updating
  * an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,19 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
 }
 
+/*
+ * We use this to invalidate a pmdp entry before switching from a
+ * hugepte to regular pmd entry.
+ */
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
-       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+       pmd_hugepage_update(vma->vm_mm, address, pmdp, ~0UL, 0);
+       /*
+        * This ensures that generic code that rely on IRQ disabling
+        * to prevent a parallel THP split work as expected.
+        */
+       kick_all_cpus_sync();
 }
 
 /*
index 7d5e295255b7b25934d51792fb6876c16462d420..9958ba8bf0d27fca74a182546170a487edb88346 100644 (file)
@@ -816,7 +816,7 @@ static struct power_pmu power8_pmu = {
        .get_constraint         = power8_get_constraint,
        .get_alternatives       = power8_get_alternatives,
        .disable_pmc            = power8_disable_pmc,
-       .flags                  = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
+       .flags                  = PPMU_HAS_SIER | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power8_generic_events),
        .generic_events         = power8_generic_events,
        .cache_events           = &power8_cache_events,
index 5f152b95ca0c8493536d787a51d741ae628adb8a..87f47e55aab65ac234df1d67c926ca17b1517f06 100644 (file)
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
         * PCI devices of the PE are expected to be removed prior
         * to PE reset.
         */
-       if (!edev->pe->bus)
+       if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
                edev->pe->bus = pci_find_bus(hose->global_number,
                                             pdn->busno);
+               if (edev->pe->bus)
+                       edev->pe->state |= EEH_PE_PRI_BUS;
+       }
 
        /*
         * Enable EEH explicitly so that we will do EEH check
index 573ae1994097fb91e15e3f7f6351fe1e73b35c59..f90dc04395bf47bcc0e662a7a1c17526220ccda2 100644 (file)
@@ -3180,6 +3180,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
 
 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
        .dma_dev_setup = pnv_pci_dma_dev_setup,
+       .dma_bus_setup = pnv_pci_dma_bus_setup,
 #ifdef CONFIG_PCI_MSI
        .setup_msi_irqs = pnv_setup_msi_irqs,
        .teardown_msi_irqs = pnv_teardown_msi_irqs,
index 2f55c86df703554bfd9541a44f8ba26bec072729..cf8417dd4dfc40f0484438e377aca98d5305928d 100644 (file)
@@ -760,6 +760,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
                phb->dma_dev_setup(phb, pdev);
 }
 
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
+{
+       struct pci_controller *hose = bus->sysdata;
+       struct pnv_phb *phb = hose->private_data;
+       struct pnv_ioda_pe *pe;
+
+       list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+               if (!(pe->flags | (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+                       continue;
+
+               if (!pe->pbus)
+                       continue;
+
+               if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+                       pe->pbus = bus;
+                       break;
+               }
+       }
+}
+
 void pnv_pci_shutdown(void)
 {
        struct pci_controller *hose;
index 7f56313e8d7223dfd9f22b927c807b20516cf941..00691a9b99af67b09967c73b39b652b973392f06 100644 (file)
@@ -242,6 +242,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
 
 extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 
index 16aa0c779e0762e210fe6652a0a5efda24771381..595a275c36f87d5ea4fa58d0a580eb36c601ef34 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/types.h>
 
+#define ARCH_IRQ_ENABLED       (3UL << (BITS_PER_LONG - 8))
+
 /* store then OR system mask. */
 #define __arch_local_irq_stosm(__or)                                   \
 ({                                                                     \
@@ -54,14 +56,17 @@ static inline notrace void arch_local_irq_enable(void)
        __arch_local_irq_stosm(0x03);
 }
 
+/* This only restores external and I/O interrupt state */
 static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
-       __arch_local_irq_ssm(flags);
+       /* only disabled->disabled and disabled->enabled is valid */
+       if (flags & ARCH_IRQ_ENABLED)
+               arch_local_irq_enable();
 }
 
 static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 {
-       return !(flags & (3UL << (BITS_PER_LONG - 8)));
+       return !(flags & ARCH_IRQ_ENABLED);
 }
 
 static inline notrace bool arch_irqs_disabled(void)
index 6742414dbd6f30b66451f04602cb55de0d20f41c..8959ebb6d2c9eb35538b835a39522585321f9af4 100644 (file)
@@ -546,7 +546,6 @@ struct kvm_vcpu_arch {
        struct kvm_s390_sie_block *sie_block;
        unsigned int      host_acrs[NUM_ACRS];
        struct fpu        host_fpregs;
-       struct fpu        guest_fpregs;
        struct kvm_s390_local_interrupt local_int;
        struct hrtimer    ckc_timer;
        struct kvm_s390_pgm_info pgm;
index 1a9a98de5bdebc346c469f55818f08ed33b576e7..69aa18be61afcfc1e764c5c51ec5cbe5d0fe2043 100644 (file)
@@ -8,10 +8,13 @@
 #include <asm/pci_insn.h>
 
 /* I/O Map */
-#define ZPCI_IOMAP_MAX_ENTRIES         0x7fff
-#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000ULL
-#define ZPCI_IOMAP_ADDR_IDX_MASK       0x7fff000000000000ULL
-#define ZPCI_IOMAP_ADDR_OFF_MASK       0x0000ffffffffffffULL
+#define ZPCI_IOMAP_SHIFT               48
+#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_OFF_MASK       ((1UL << ZPCI_IOMAP_SHIFT) - 1)
+#define ZPCI_IOMAP_MAX_ENTRIES                                                 \
+       ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+#define ZPCI_IOMAP_ADDR_IDX_MASK                                               \
+       (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
 
 struct zpci_iomap_entry {
        u32 fh;
@@ -21,8 +24,9 @@ struct zpci_iomap_entry {
 
 extern struct zpci_iomap_entry *zpci_iomap_start;
 
+#define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT))
 #define ZPCI_IDX(addr)                                                         \
-       (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
+       (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT)
 #define ZPCI_OFFSET(addr)                                                      \
        ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
 
index f16debf6a612932a7312428465d9ceb1bee41915..1c4fe129486d2e0a9282dc72a3735c4727277b52 100644 (file)
@@ -166,14 +166,14 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
  */
 #define start_thread(regs, new_psw, new_stackp) do {                   \
        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;    \
-       regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
+       regs->psw.addr  = new_psw;                                      \
        regs->gprs[15]  = new_stackp;                                   \
        execve_tail();                                                  \
 } while (0)
 
 #define start_thread31(regs, new_psw, new_stackp) do {                 \
        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
-       regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
+       regs->psw.addr  = new_psw;                                      \
        regs->gprs[15]  = new_stackp;                                   \
        crst_table_downgrade(current->mm, 1UL << 31);                   \
        execve_tail();                                                  \
index f00cd35c8ac4634e9270ff81e73e603ccc5573f0..99bc456cc26a3087cbec7f35ca82959a94cf2ab3 100644 (file)
@@ -149,7 +149,7 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
 #define arch_has_block_step()  (1)
 
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
-#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
+#define instruction_pointer(regs) ((regs)->psw.addr)
 #define user_stack_pointer(regs)((regs)->gprs[15])
 #define profile_pc(regs) instruction_pointer(regs)
 
@@ -161,7 +161,7 @@ static inline long regs_return_value(struct pt_regs *regs)
 static inline void instruction_pointer_set(struct pt_regs *regs,
                                           unsigned long val)
 {
-       regs->psw.addr = val | PSW_ADDR_AMODE;
+       regs->psw.addr = val;
 }
 
 int regs_query_register_offset(const char *name);
@@ -171,7 +171,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
 
 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 {
-       return regs->gprs[15] & PSW_ADDR_INSN;
+       return regs->gprs[15];
 }
 
 #endif /* __ASSEMBLY__ */
index 34ec202472c6cadb5b973b54c6c995bb6d2e897c..ab3aa6875a59ca4dc3760663d79189090286d7ab 100644 (file)
 #define __NR_recvmsg           372
 #define __NR_shutdown          373
 #define __NR_mlock2            374
-#define NR_syscalls 375
+#define __NR_copy_file_range   375
+#define NR_syscalls 376
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index fac4eeddef91fe3acb21e0108069f3f3a70ef72a..ae2cda5eee5a99b35b73e5b7868edd44cba1c6d2 100644 (file)
@@ -177,3 +177,4 @@ COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
 COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
+COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
index a92b39fd0e63d953db2d94bbfb090d9f257e867f..3986c9f6219174bf7462b88579b5b030d830185e 100644 (file)
@@ -59,8 +59,6 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
        struct save_area *sa;
 
        sa = (void *) memblock_alloc(sizeof(*sa), 8);
-       if (!sa)
-               return NULL;
        if (is_boot_cpu)
                list_add(&sa->list, &dump_save_areas);
        else
index 6fca0e46464e01842f613584e4c82e5ca1fe4270..c890a5589e59583bb06b1d7dffe7226069e522a0 100644 (file)
@@ -1470,7 +1470,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
                except_str = "*";
        else
                except_str = "-";
-       caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
+       caller = (unsigned long) entry->caller;
        rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p  ",
                      area, (long long)time_spec.tv_sec,
                      time_spec.tv_nsec / 1000, level, except_str,
index dc8e20473484ce32f037f1e9115779a8a9d7d0cd..02bd02ff648b5ab570c5ffe920b27bbfbc3d5fd3 100644 (file)
@@ -34,22 +34,21 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
        unsigned long addr;
 
        while (1) {
-               sp = sp & PSW_ADDR_INSN;
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
-               addr = sf->gprs[8] & PSW_ADDR_INSN;
+               addr = sf->gprs[8];
                printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
                /* Follow the backchain. */
                while (1) {
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
-                       addr = sf->gprs[8] & PSW_ADDR_INSN;
+                       addr = sf->gprs[8];
                        printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
                }
                /* Zero backchain detected, check for interrupt frame. */
@@ -57,7 +56,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
                if (sp <= low || sp > high - sizeof(*regs))
                        return sp;
                regs = (struct pt_regs *) sp;
-               addr = regs->psw.addr & PSW_ADDR_INSN;
+               addr = regs->psw.addr;
                printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
                low = sp;
                sp = regs->gprs[15];
index 20a5caf6d981d9f81852b015765662c745ed6b3b..c55576bbaa1f7555479994d6848c9aa5b60f189b 100644 (file)
@@ -252,14 +252,14 @@ static void early_pgm_check_handler(void)
        unsigned long addr;
 
        addr = S390_lowcore.program_old_psw.addr;
-       fixup = search_exception_tables(addr & PSW_ADDR_INSN);
+       fixup = search_exception_tables(addr);
        if (!fixup)
                disabled_wait(0);
        /* Disable low address protection before storing into lowcore. */
        __ctl_store(cr0, 0, 0);
        cr0_new = cr0 & ~(1UL << 28);
        __ctl_load(cr0_new, 0, 0);
-       S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
+       S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
        __ctl_load(cr0, 0, 0);
 }
 
@@ -268,9 +268,9 @@ static noinline __init void setup_lowcore_early(void)
        psw_t psw;
 
        psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
-       psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
+       psw.addr = (unsigned long) s390_base_ext_handler;
        S390_lowcore.external_new_psw = psw;
-       psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+       psw.addr = (unsigned long) s390_base_pgm_handler;
        S390_lowcore.program_new_psw = psw;
        s390_base_pgm_handler_fn = early_pgm_check_handler;
 }
index e0eaf11134b44706e1223daca7bd66e938bf37f0..0f7bfeba6da6a3632f2705ebbaccc065ffd356a2 100644 (file)
@@ -203,7 +203,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
                goto out;
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
-       ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
+       ip -= MCOUNT_INSN_SIZE;
        trace.func = ip;
        trace.depth = current->curr_ret_stack + 1;
        /* Only trace if the calling function expects to. */
index 0a5a6b661b938743684a6aec8d1d5ebcca537be1..f20abdb5630adceaa75690907ddf7cdedc098146 100644 (file)
@@ -2057,12 +2057,12 @@ void s390_reset_system(void)
        /* Set new machine check handler */
        S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.mcck_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
+               (unsigned long) s390_base_mcck_handler;
 
        /* Set new program check handler */
        S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.program_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+               (unsigned long) s390_base_pgm_handler;
 
        /*
         * Clear subchannel ID and number to signal new kernel that no CCW or
index 389db56a2208bfa118d6f31aee9b52d090031960..250f5972536a8948a916e56a4514e942e03bde4a 100644 (file)
@@ -226,7 +226,7 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
        __ctl_load(per_kprobe, 9, 11);
        regs->psw.mask |= PSW_MASK_PER;
        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-       regs->psw.addr = ip | PSW_ADDR_AMODE;
+       regs->psw.addr = ip;
 }
 NOKPROBE_SYMBOL(enable_singlestep);
 
@@ -238,7 +238,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
        __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
        regs->psw.mask &= ~PSW_MASK_PER;
        regs->psw.mask |= kcb->kprobe_saved_imask;
-       regs->psw.addr = ip | PSW_ADDR_AMODE;
+       regs->psw.addr = ip;
 }
 NOKPROBE_SYMBOL(disable_singlestep);
 
@@ -310,7 +310,7 @@ static int kprobe_handler(struct pt_regs *regs)
         */
        preempt_disable();
        kcb = get_kprobe_ctlblk();
-       p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
+       p = get_kprobe((void *)(regs->psw.addr - 2));
 
        if (p) {
                if (kprobe_running()) {
@@ -460,7 +460,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
                        break;
        }
 
-       regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+       regs->psw.addr = orig_ret_address;
 
        pop_kprobe(get_kprobe_ctlblk());
        kretprobe_hash_unlock(current, &flags);
@@ -490,7 +490,7 @@ NOKPROBE_SYMBOL(trampoline_probe_handler);
 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
+       unsigned long ip = regs->psw.addr;
        int fixup = probe_get_fixup_type(p->ainsn.insn);
 
        /* Check if the kprobes location is an enabled ftrace caller */
@@ -605,9 +605,9 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
                 * In case the user-specified fault handler returned
                 * zero, try to fix up.
                 */
-               entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+               entry = search_exception_tables(regs->psw.addr);
                if (entry) {
-                       regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
+                       regs->psw.addr = extable_fixup(entry);
                        return 1;
                }
 
@@ -683,7 +683,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 
        /* setup return addr to the jprobe handler routine */
-       regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
+       regs->psw.addr = (unsigned long) jp->entry;
        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 
        /* r15 is the stack pointer */
index 61595c1f0a0fe7d502bb6c213519929effe48b02..0943b11a2f6e22c088dc13d3b8a0de9b4e6ef92c 100644 (file)
@@ -74,7 +74,7 @@ static unsigned long guest_is_user_mode(struct pt_regs *regs)
 
 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
 {
-       return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
+       return sie_block(regs)->gpsw.addr;
 }
 
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
@@ -231,29 +231,27 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
        struct pt_regs *regs;
 
        while (1) {
-               sp = sp & PSW_ADDR_INSN;
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
-               perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
+               perf_callchain_store(entry, sf->gprs[8]);
                /* Follow the backchain. */
                while (1) {
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
-                       perf_callchain_store(entry,
-                                            sf->gprs[8] & PSW_ADDR_INSN);
+                       perf_callchain_store(entry, sf->gprs[8]);
                }
                /* Zero backchain detected, check for interrupt frame. */
                sp = (unsigned long) (sf + 1);
                if (sp <= low || sp > high - sizeof(*regs))
                        return sp;
                regs = (struct pt_regs *) sp;
-               perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
+               perf_callchain_store(entry, sf->gprs[8]);
                low = sp;
                sp = regs->gprs[15];
        }
@@ -262,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
 void perf_callchain_kernel(struct perf_callchain_entry *entry,
                           struct pt_regs *regs)
 {
-       unsigned long head;
+       unsigned long head, frame_size;
        struct stack_frame *head_sf;
 
        if (user_mode(regs))
                return;
 
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        head = regs->gprs[15];
        head_sf = (struct stack_frame *) head;
 
@@ -275,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
                return;
 
        head = head_sf->back_chain;
-       head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
-                            S390_lowcore.async_stack);
+       head = __store_trace(entry, head,
+                            S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                            S390_lowcore.async_stack + frame_size);
 
        __store_trace(entry, head, S390_lowcore.thread_info,
                      S390_lowcore.thread_info + THREAD_SIZE);
index 114ee8b96f17d31d5dba657c704bc34a2e1b9a9e..2bba7df4ac519fdc773551600b8bab2b9fef4789 100644 (file)
@@ -56,10 +56,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
                return 0;
        low = task_stack_page(tsk);
        high = (struct stack_frame *) task_pt_regs(tsk);
-       sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
+       sf = (struct stack_frame *) tsk->thread.ksp;
        if (sf <= low || sf > high)
                return 0;
-       sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+       sf = (struct stack_frame *) sf->back_chain;
        if (sf <= low || sf > high)
                return 0;
        return sf->gprs[8];
@@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
                memset(&frame->childregs, 0, sizeof(struct pt_regs));
                frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
                                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-               frame->childregs.psw.addr = PSW_ADDR_AMODE |
+               frame->childregs.psw.addr =
                                (unsigned long) kernel_thread_starter;
                frame->childregs.gprs[9] = new_stackp; /* function */
                frame->childregs.gprs[10] = arg;
@@ -220,14 +220,14 @@ unsigned long get_wchan(struct task_struct *p)
                return 0;
        low = task_stack_page(p);
        high = (struct stack_frame *) task_pt_regs(p);
-       sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
+       sf = (struct stack_frame *) p->thread.ksp;
        if (sf <= low || sf > high)
                return 0;
        for (count = 0; count < 16; count++) {
-               sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+               sf = (struct stack_frame *) sf->back_chain;
                if (sf <= low || sf > high)
                        return 0;
-               return_address = sf->gprs[8] & PSW_ADDR_INSN;
+               return_address = sf->gprs[8];
                if (!in_sched_functions(return_address))
                        return return_address;
        }
index 01c37b36caf964ec4616048c00d6602607a9abca..49b1c13bf6c96372d60815e8549c10b15b40914f 100644 (file)
@@ -84,7 +84,7 @@ void update_cr_regs(struct task_struct *task)
                if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
                        new.control |= PER_EVENT_IFETCH;
                new.start = 0;
-               new.end = PSW_ADDR_INSN;
+               new.end = -1UL;
        }
 
        /* Take care of the PER enablement bit in the PSW. */
@@ -148,7 +148,7 @@ static inline unsigned long __peek_user_per(struct task_struct *child,
        else if (addr == (addr_t) &dummy->cr11)
                /* End address of the active per set. */
                return test_thread_flag(TIF_SINGLE_STEP) ?
-                       PSW_ADDR_INSN : child->thread.per_user.end;
+                       -1UL : child->thread.per_user.end;
        else if (addr == (addr_t) &dummy->bits)
                /* Single-step bit. */
                return test_thread_flag(TIF_SINGLE_STEP) ?
@@ -495,8 +495,6 @@ long arch_ptrace(struct task_struct *child, long request,
                }
                return 0;
        default:
-               /* Removing high order bit from addr (only for 31 bit). */
-               addr &= PSW_ADDR_INSN;
                return ptrace_request(child, request, addr, data);
        }
 }
index c6878fbbcf1324661ac52f4f55ba1e40840fbd89..9220db5c996aa5250ca4d904361c51cec33d7827 100644 (file)
@@ -301,25 +301,21 @@ static void __init setup_lowcore(void)
        BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
        lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
        lc->restart_psw.mask = PSW_KERNEL_BITS;
-       lc->restart_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+       lc->restart_psw.addr = (unsigned long) restart_int_handler;
        lc->external_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
-       lc->external_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
+       lc->external_new_psw.addr = (unsigned long) ext_int_handler;
        lc->svc_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-       lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
+       lc->svc_new_psw.addr = (unsigned long) system_call;
        lc->program_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
-       lc->program_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
+       lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
        lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
-       lc->mcck_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
+       lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
        lc->io_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
-       lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
+       lc->io_new_psw.addr = (unsigned long) io_int_handler;
        lc->clock_comparator = -1ULL;
        lc->kernel_stack = ((unsigned long) &init_thread_union)
                + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
index 028cc46cb82ad77ac21a7c9b9ea897e2312480d4..d82562cf0a0e1f5057987f59f6d541ea374ea683 100644 (file)
@@ -331,13 +331,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        /* Set up to return from userspace.  If provided, use a stub
           already in userspace.  */
        if (ka->sa.sa_flags & SA_RESTORER) {
-               restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
+               restorer = (unsigned long) ka->sa.sa_restorer;
        } else {
                /* Signal frame without vector registers are short ! */
                __u16 __user *svc = (void __user *) frame + frame_size - 2;
                if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
                        return -EFAULT;
-               restorer = (unsigned long) svc | PSW_ADDR_AMODE;
+               restorer = (unsigned long) svc;
        }
 
        /* Set up registers for signal handler */
@@ -347,7 +347,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
                (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
-       regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+       regs->psw.addr = (unsigned long) ka->sa.sa_handler;
 
        regs->gprs[2] = sig;
        regs->gprs[3] = (unsigned long) &frame->sc;
@@ -394,13 +394,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        /* Set up to return from userspace.  If provided, use a stub
           already in userspace.  */
        if (ksig->ka.sa.sa_flags & SA_RESTORER) {
-               restorer = (unsigned long)
-                       ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
+               restorer = (unsigned long) ksig->ka.sa.sa_restorer;
        } else {
                __u16 __user *svc = &frame->svc_insn;
                if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
                        return -EFAULT;
-               restorer = (unsigned long) svc | PSW_ADDR_AMODE;
+               restorer = (unsigned long) svc;
        }
 
        /* Create siginfo on the signal stack */
@@ -426,7 +425,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
                (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
-       regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE;
+       regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler;
 
        regs->gprs[2] = ksig->sig;
        regs->gprs[3] = (unsigned long) &frame->info;
index a13468b9a9133b9f46d8c90575ea0b5ffcd41e04..3c65a8eae34d35b6a670066db2bc80c8fa3ccf67 100644 (file)
@@ -623,8 +623,6 @@ void __init smp_save_dump_cpus(void)
                return;
        /* Allocate a page as dumping area for the store status sigps */
        page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
-       if (!page)
-               panic("could not allocate memory for save area\n");
        /* Set multi-threading state to the previous system. */
        pcpu_set_smt(sclp.mtid_prev);
        boot_cpu_addr = stap();
index 1785cd82253caec869abbb7f5d7804b42effad04..8f64ebd63767c7a1a3e434b994b65110e890f741 100644 (file)
@@ -21,12 +21,11 @@ static unsigned long save_context_stack(struct stack_trace *trace,
        unsigned long addr;
 
        while(1) {
-               sp &= PSW_ADDR_INSN;
                if (sp < low || sp > high)
                        return sp;
                sf = (struct stack_frame *)sp;
                while(1) {
-                       addr = sf->gprs[8] & PSW_ADDR_INSN;
+                       addr = sf->gprs[8];
                        if (!trace->skip)
                                trace->entries[trace->nr_entries++] = addr;
                        else
@@ -34,7 +33,7 @@ static unsigned long save_context_stack(struct stack_trace *trace,
                        if (trace->nr_entries >= trace->max_entries)
                                return sp;
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
@@ -46,7 +45,7 @@ static unsigned long save_context_stack(struct stack_trace *trace,
                if (sp <= low || sp > high - sizeof(*regs))
                        return sp;
                regs = (struct pt_regs *)sp;
-               addr = regs->psw.addr & PSW_ADDR_INSN;
+               addr = regs->psw.addr;
                if (savesched || !in_sched_functions(addr)) {
                        if (!trace->skip)
                                trace->entries[trace->nr_entries++] = addr;
@@ -60,33 +59,43 @@ static unsigned long save_context_stack(struct stack_trace *trace,
        }
 }
 
-void save_stack_trace(struct stack_trace *trace)
+static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
 {
-       register unsigned long sp asm ("15");
-       unsigned long orig_sp, new_sp;
+       unsigned long new_sp, frame_size;
 
-       orig_sp = sp & PSW_ADDR_INSN;
-       new_sp = save_context_stack(trace, orig_sp,
-                                   S390_lowcore.panic_stack - PAGE_SIZE,
-                                   S390_lowcore.panic_stack, 1);
-       if (new_sp != orig_sp)
-               return;
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+       new_sp = save_context_stack(trace, sp,
+                       S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
+                       S390_lowcore.panic_stack + frame_size, 1);
        new_sp = save_context_stack(trace, new_sp,
-                                   S390_lowcore.async_stack - ASYNC_SIZE,
-                                   S390_lowcore.async_stack, 1);
-       if (new_sp != orig_sp)
-               return;
+                       S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                       S390_lowcore.async_stack + frame_size, 1);
        save_context_stack(trace, new_sp,
                           S390_lowcore.thread_info,
                           S390_lowcore.thread_info + THREAD_SIZE, 1);
 }
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       register unsigned long r15 asm ("15");
+       unsigned long sp;
+
+       sp = r15;
+       __save_stack_trace(trace, sp);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
        unsigned long sp, low, high;
 
-       sp = tsk->thread.ksp & PSW_ADDR_INSN;
+       sp = tsk->thread.ksp;
+       if (tsk == current) {
+               /* Get current stack pointer. */
+               asm volatile("la %0,0(15)" : "=a" (sp));
+       }
        low = (unsigned long) task_stack_page(tsk);
        high = (unsigned long) task_pt_regs(tsk);
        save_context_stack(trace, sp, low, high, 0);
@@ -94,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+       unsigned long sp;
+
+       sp = kernel_stack_pointer(regs);
+       __save_stack_trace(trace, sp);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
index 5378c3ea1b984918ed3719c8a9cf8d1c8c86574a..293d8b98fd525c992146fa4f5c85f1287d90bc62 100644 (file)
@@ -383,3 +383,4 @@ SYSCALL(sys_recvfrom,compat_sys_recvfrom)
 SYSCALL(sys_recvmsg,compat_sys_recvmsg)
 SYSCALL(sys_shutdown,sys_shutdown)
 SYSCALL(sys_mlock2,compat_sys_mlock2)
+SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
index d69d648759c9f926ae91771cef6a8cc291ee403e..017eb03daee2e724a603420f65b6e93dde98ac6a 100644 (file)
@@ -32,8 +32,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
                address = *(unsigned long *)(current->thread.trap_tdb + 24);
        else
                address = regs->psw.addr;
-       return (void __user *)
-               ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
+       return (void __user *) (address - (regs->int_code >> 16));
 }
 
 static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -46,7 +45,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
                return;
        printk("User process fault: interruption code %04x ilc:%d ",
               regs->int_code & 0xffff, regs->int_code >> 17);
-       print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
+       print_vma_addr("in ", regs->psw.addr);
        printk("\n");
        show_regs(regs);
 }
@@ -69,13 +68,13 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
                report_user_fault(regs, si_signo);
         } else {
                 const struct exception_table_entry *fixup;
-                fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+               fixup = search_exception_tables(regs->psw.addr);
                 if (fixup)
-                       regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
+                       regs->psw.addr = extable_fixup(fixup);
                else {
                        enum bug_trap_type btt;
 
-                       btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
+                       btt = report_bug(regs->psw.addr, regs);
                        if (btt == BUG_TRAP_TYPE_WARN)
                                return;
                        die(regs, str);
index 5fce52cf0e57dc4831d0737ddd3c3a2cfbc386d7..5ea5af3c7db72479a69710a6606d8dddd6143b7a 100644 (file)
@@ -29,6 +29,7 @@ config KVM
        select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select SRCU
+       select KVM_VFIO
        ---help---
          Support hosting paravirtualized guest machines using the SIE
          virtualization capability on the mainframe. This should work
index b3b553469650888fd31df5d975fe4b93b2399f70..d42fa38c24292157c0da0f015d2063bc1c5e5723 100644 (file)
@@ -7,7 +7,7 @@
 # as published by the Free Software Foundation.
 
 KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
 
 ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
 
index 47518a324d752286d4b0cb80784c43c316f31c45..d697312ce9ee325571f25c74fb0d963f50943537 100644 (file)
@@ -116,7 +116,7 @@ static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
        if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
                *cr9 &= ~PER_CONTROL_ALTERATION;
                *cr10 = 0;
-               *cr11 = PSW_ADDR_INSN;
+               *cr11 = -1UL;
        } else {
                *cr9 &= ~PER_CONTROL_ALTERATION;
                *cr9 |= PER_EVENT_STORE;
@@ -159,7 +159,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
                vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
                vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
                vcpu->arch.sie_block->gcr[10] = 0;
-               vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
+               vcpu->arch.sie_block->gcr[11] = -1UL;
        }
 
        if (guestdbg_hw_bp_enabled(vcpu)) {
index 835d60bedb545fa1873b6b37e57dffbec3bb9600..4af21c771f9b3925bf860d0dc86cebcc803cf965 100644 (file)
@@ -1423,44 +1423,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-/*
- * Backs up the current FP/VX register save area on a particular
- * destination.  Used to switch between different register save
- * areas.
- */
-static inline void save_fpu_to(struct fpu *dst)
-{
-       dst->fpc = current->thread.fpu.fpc;
-       dst->regs = current->thread.fpu.regs;
-}
-
-/*
- * Switches the FP/VX register save area from which to lazy
- * restore register contents.
- */
-static inline void load_fpu_from(struct fpu *from)
-{
-       current->thread.fpu.fpc = from->fpc;
-       current->thread.fpu.regs = from->regs;
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        /* Save host register state */
        save_fpu_regs();
-       save_fpu_to(&vcpu->arch.host_fpregs);
-
-       if (test_kvm_facility(vcpu->kvm, 129)) {
-               current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
-               /*
-                * Use the register save area in the SIE-control block
-                * for register restore and save in kvm_arch_vcpu_put()
-                */
-               current->thread.fpu.vxrs =
-                       (__vector128 *)&vcpu->run->s.regs.vrs;
-       } else
-               load_fpu_from(&vcpu->arch.guest_fpregs);
+       vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+       vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
 
+       /* Depending on MACHINE_HAS_VX, data stored to vrs either
+        * has vector register or floating point register format.
+        */
+       current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+       current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
        if (test_fp_ctl(current->thread.fpu.fpc))
                /* User space provided an invalid FPC, let's clear it */
                current->thread.fpu.fpc = 0;
@@ -1476,19 +1450,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
 
+       /* Save guest register state */
        save_fpu_regs();
+       vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
 
-       if (test_kvm_facility(vcpu->kvm, 129))
-               /*
-                * kvm_arch_vcpu_load() set up the register save area to
-                * the &vcpu->run->s.regs.vrs and, thus, the vector registers
-                * are already saved.  Only the floating-point control must be
-                * copied.
-                */
-               vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-       else
-               save_fpu_to(&vcpu->arch.guest_fpregs);
-       load_fpu_from(&vcpu->arch.host_fpregs);
+       /* Restore host register state */
+       current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+       current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
 
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
@@ -1506,8 +1474,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
        vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
        vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
-       vcpu->arch.guest_fpregs.fpc = 0;
-       asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+       /* make sure the new fpc will be lazily loaded */
+       save_fpu_regs();
+       current->thread.fpu.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@@ -1648,17 +1617,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 
-       /*
-        * Allocate a save area for floating-point registers.  If the vector
-        * extension is available, register contents are saved in the SIE
-        * control block.  The allocated save area is still required in
-        * particular places, for example, in kvm_s390_vcpu_store_status().
-        */
-       vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
-                                              GFP_KERNEL);
-       if (!vcpu->arch.guest_fpregs.fprs)
-               goto out_free_sie_block;
-
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
                goto out_free_sie_block;
@@ -1879,19 +1837,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
+       /* make sure the new values will be lazily loaded */
+       save_fpu_regs();
        if (test_fp_ctl(fpu->fpc))
                return -EINVAL;
-       memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
-       save_fpu_regs();
-       load_fpu_from(&vcpu->arch.guest_fpregs);
+       current->thread.fpu.fpc = fpu->fpc;
+       if (MACHINE_HAS_VX)
+               convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+       else
+               memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
-       fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+       /* make sure we have the latest values */
+       save_fpu_regs();
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+       else
+               memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+       fpu->fpc = current->thread.fpu.fpc;
        return 0;
 }
 
@@ -2396,6 +2362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 {
        unsigned char archmode = 1;
+       freg_t fprs[NUM_FPRS];
        unsigned int px;
        u64 clkcomp;
        int rc;
@@ -2411,8 +2378,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
                gpa = px;
        } else
                gpa -= __LC_FPREGS_SAVE_AREA;
-       rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
-                            vcpu->arch.guest_fpregs.fprs, 128);
+
+       /* manually convert vector registers if necessary */
+       if (MACHINE_HAS_VX) {
+               convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
+               rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+                                    fprs, 128);
+       } else {
+               rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+                                    vcpu->run->s.regs.vrs, 128);
+       }
        rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
                              vcpu->run->s.regs.gprs, 128);
        rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
@@ -2420,7 +2395,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
        rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
                              &px, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
-                             &vcpu->arch.guest_fpregs.fpc, 4);
+                             &vcpu->run->s.regs.fpc, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
                              &vcpu->arch.sie_block->todpr, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
@@ -2443,19 +2418,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * it into the save area
         */
        save_fpu_regs();
-       if (test_kvm_facility(vcpu->kvm, 129)) {
-               /*
-                * If the vector extension is available, the vector registers
-                * which overlaps with floating-point registers are saved in
-                * the SIE-control block.  Hence, extract the floating-point
-                * registers and the FPC value and store them in the
-                * guest_fpregs structure.
-                */
-               vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
-               convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
-                                current->thread.fpu.vxrs);
-       } else
-               save_fpu_to(&vcpu->arch.guest_fpregs);
+       vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
        save_access_regs(vcpu->run->s.regs.acrs);
 
        return kvm_s390_store_status_unloaded(vcpu, addr);
index 1b903f6ad54a327ca73ee88bdecee0a73e1eaf42..791a4146052c7a48fdb72a6ef3b9e0d1abc06eeb 100644 (file)
@@ -228,7 +228,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
                return;
        printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
               regs->int_code & 0xffff, regs->int_code >> 17);
-       print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
+       print_vma_addr(KERN_CONT "in ", regs->psw.addr);
        printk(KERN_CONT "\n");
        printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
               regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
@@ -256,9 +256,9 @@ static noinline void do_no_context(struct pt_regs *regs)
        const struct exception_table_entry *fixup;
 
        /* Are we prepared to handle this kernel fault?  */
-       fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+       fixup = search_exception_tables(regs->psw.addr);
        if (fixup) {
-               regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
+               regs->psw.addr = extable_fixup(fixup);
                return;
        }
 
index c722400c769784d8df6f13507693e117ad942497..73e29033709285b44ceb5609919c07aff86e9de9 100644 (file)
@@ -98,7 +98,7 @@ void __init paging_init(void)
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
-       arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
+       __arch_local_irq_stosm(0x04);
 
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
        sparse_init();
index ea01477b4aa67155f87b88e8c6a87f5374eda247..45c4daa49930f9a2bdfaf1d07a6ab5e002aa99c2 100644 (file)
@@ -169,12 +169,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
 {
-       if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+       if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE)
                return 0;
        if (!(flags & MAP_FIXED))
                addr = 0;
        if ((addr + len) >= TASK_SIZE)
-               return crst_table_upgrade(current->mm, 1UL << 53);
+               return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
        return 0;
 }
 
@@ -189,9 +189,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
        area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
        if (!(area & ~PAGE_MASK))
                return area;
-       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
                /* Upgrade the page table to 4 levels and retry. */
-               rc = crst_table_upgrade(mm, 1UL << 53);
+               rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
                if (rc)
                        return (unsigned long) rc;
                area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -211,9 +211,9 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
        area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
        if (!(area & ~PAGE_MASK))
                return area;
-       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
                /* Upgrade the page table to 4 levels and retry. */
-               rc = crst_table_upgrade(mm, 1UL << 53);
+               rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
                if (rc)
                        return (unsigned long) rc;
                area = arch_get_unmapped_area_topdown(filp, addr, len,
index a809fa8e6f8bd01d2c0ad90891060d6aabd36a80..5109827883acaefc6afc0ac913956fcb79f8f8a0 100644 (file)
@@ -55,7 +55,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
        unsigned long entry;
        int flush;
 
-       BUG_ON(limit > (1UL << 53));
+       BUG_ON(limit > TASK_MAX_SIZE);
        flush = 0;
 repeat:
        table = crst_table_alloc(mm);
index 43f32ce60aa3d98af0b7665090fa3eb080d12fa7..2794845061c66fe3605c0b2e97b591b6cf176d32 100644 (file)
@@ -57,9 +57,7 @@ static __init pg_data_t *alloc_node_data(void)
 {
        pg_data_t *res;
 
-       res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1);
-       if (!res)
-               panic("Could not allocate memory for node data!\n");
+       res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
        memset(res, 0, sizeof(pg_data_t));
        return res;
 }
@@ -162,7 +160,7 @@ static int __init numa_init_late(void)
                register_one_node(nid);
        return 0;
 }
-device_initcall(numa_init_late);
+arch_initcall(numa_init_late);
 
 static int __init parse_debug(char *parm)
 {
index 8a6811b2cdb9523a24cd32341dee0fcef317f661..1884e17595294bbfafdcaf8e183fd80e40d8ea48 100644 (file)
@@ -16,24 +16,23 @@ __show_trace(unsigned int *depth, unsigned long sp,
        struct pt_regs *regs;
 
        while (*depth) {
-               sp = sp & PSW_ADDR_INSN;
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
                (*depth)--;
-               oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+               oprofile_add_trace(sf->gprs[8]);
 
                /* Follow the backchain.  */
                while (*depth) {
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
                        (*depth)--;
-                       oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+                       oprofile_add_trace(sf->gprs[8]);
 
                }
 
@@ -46,7 +45,7 @@ __show_trace(unsigned int *depth, unsigned long sp,
                        return sp;
                regs = (struct pt_regs *) sp;
                (*depth)--;
-               oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+               oprofile_add_trace(sf->gprs[8]);
                low = sp;
                sp = regs->gprs[15];
        }
@@ -55,12 +54,13 @@ __show_trace(unsigned int *depth, unsigned long sp,
 
 void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
-       unsigned long head;
+       unsigned long head, frame_size;
        struct stack_frame* head_sf;
 
        if (user_mode(regs))
                return;
 
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        head = regs->gprs[15];
        head_sf = (struct stack_frame*)head;
 
@@ -69,8 +69,9 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
 
        head = head_sf->back_chain;
 
-       head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
-                           S390_lowcore.async_stack);
+       head = __show_trace(&depth, head,
+                           S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                           S390_lowcore.async_stack + frame_size);
 
        __show_trace(&depth, head, S390_lowcore.thread_info,
                     S390_lowcore.thread_info + THREAD_SIZE);
index 11d4f277e9f69ac3d266e14ca9804e4b6c844fac..8f19c8f9d660570839a69f60477f855181ba4e7c 100644 (file)
@@ -68,9 +68,12 @@ static struct airq_struct zpci_airq = {
        .isc = PCI_ISC,
 };
 
-/* I/O Map */
+#define ZPCI_IOMAP_ENTRIES                                             \
+       min(((unsigned long) CONFIG_PCI_NR_FUNCTIONS * PCI_BAR_COUNT),  \
+           ZPCI_IOMAP_MAX_ENTRIES)
+
 static DEFINE_SPINLOCK(zpci_iomap_lock);
-static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+static unsigned long *zpci_iomap_bitmap;
 struct zpci_iomap_entry *zpci_iomap_start;
 EXPORT_SYMBOL_GPL(zpci_iomap_start);
 
@@ -265,27 +268,20 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
                              unsigned long max)
 {
        struct zpci_dev *zdev = to_zpci(pdev);
-       u64 addr;
        int idx;
 
-       if ((bar & 7) != bar)
+       if (!pci_resource_len(pdev, bar))
                return NULL;
 
        idx = zdev->bars[bar].map_idx;
        spin_lock(&zpci_iomap_lock);
-       if (zpci_iomap_start[idx].count++) {
-               BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
-                      zpci_iomap_start[idx].bar != bar);
-       } else {
-               zpci_iomap_start[idx].fh = zdev->fh;
-               zpci_iomap_start[idx].bar = bar;
-       }
        /* Detect overrun */
-       BUG_ON(!zpci_iomap_start[idx].count);
+       WARN_ON(!++zpci_iomap_start[idx].count);
+       zpci_iomap_start[idx].fh = zdev->fh;
+       zpci_iomap_start[idx].bar = bar;
        spin_unlock(&zpci_iomap_lock);
 
-       addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
-       return (void __iomem *) addr + offset;
+       return (void __iomem *) ZPCI_ADDR(idx) + offset;
 }
 EXPORT_SYMBOL(pci_iomap_range);
 
@@ -297,12 +293,11 @@ EXPORT_SYMBOL(pci_iomap);
 
 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 {
-       unsigned int idx;
+       unsigned int idx = ZPCI_IDX(addr);
 
-       idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
        spin_lock(&zpci_iomap_lock);
        /* Detect underrun */
-       BUG_ON(!zpci_iomap_start[idx].count);
+       WARN_ON(!zpci_iomap_start[idx].count);
        if (!--zpci_iomap_start[idx].count) {
                zpci_iomap_start[idx].fh = 0;
                zpci_iomap_start[idx].bar = 0;
@@ -544,15 +539,15 @@ static void zpci_irq_exit(void)
 
 static int zpci_alloc_iomap(struct zpci_dev *zdev)
 {
-       int entry;
+       unsigned long entry;
 
        spin_lock(&zpci_iomap_lock);
-       entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
-       if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
+       entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
+       if (entry == ZPCI_IOMAP_ENTRIES) {
                spin_unlock(&zpci_iomap_lock);
                return -ENOSPC;
        }
-       set_bit(entry, zpci_iomap);
+       set_bit(entry, zpci_iomap_bitmap);
        spin_unlock(&zpci_iomap_lock);
        return entry;
 }
@@ -561,7 +556,7 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
 {
        spin_lock(&zpci_iomap_lock);
        memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
-       clear_bit(entry, zpci_iomap);
+       clear_bit(entry, zpci_iomap_bitmap);
        spin_unlock(&zpci_iomap_lock);
 }
 
@@ -611,8 +606,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
                if (zdev->bars[i].val & 4)
                        flags |= IORESOURCE_MEM_64;
 
-               addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
-
+               addr = ZPCI_ADDR(entry);
                size = 1UL << zdev->bars[i].size;
 
                res = __alloc_res(zdev, addr, size, flags);
@@ -873,23 +867,30 @@ static int zpci_mem_init(void)
        zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
                                16, 0, NULL);
        if (!zdev_fmb_cache)
-               goto error_zdev;
+               goto error_fmb;
 
-       /* TODO: use realloc */
-       zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
-                                  GFP_KERNEL);
+       zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
+                                  sizeof(*zpci_iomap_start), GFP_KERNEL);
        if (!zpci_iomap_start)
                goto error_iomap;
-       return 0;
 
+       zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
+                                   sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
+       if (!zpci_iomap_bitmap)
+               goto error_iomap_bitmap;
+
+       return 0;
+error_iomap_bitmap:
+       kfree(zpci_iomap_start);
 error_iomap:
        kmem_cache_destroy(zdev_fmb_cache);
-error_zdev:
+error_fmb:
        return -ENOMEM;
 }
 
 static void zpci_mem_exit(void)
 {
+       kfree(zpci_iomap_bitmap);
        kfree(zpci_iomap_start);
        kmem_cache_destroy(zdev_fmb_cache);
 }
index 369a3e05d468dbf1973df24c77e4ae07e580c3b6..b0e04751c5d599fbdbb4bdb68fdbed5dcb4039d5 100644 (file)
@@ -53,6 +53,11 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
 
        pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+
+       if (!pdev)
+               return;
+
+       pdev->error_state = pci_channel_io_perm_failure;
 }
 
 void zpci_event_error(void *data)
index f887c6465a821b7b96a2a300268f077afd70ae5a..8a84e05adb2e122fa0d39a1c2acdfadf63202266 100644 (file)
@@ -33,7 +33,6 @@
 #endif
 
 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
 
 #include <asm-generic/barrier.h>
 
index 1c26d440d288dfb8d28579fda55e2a76ccf4c139..b6de8b10a55b8b8f09eedb2c90d86906d5445686 100644 (file)
 #define __NR_listen            354
 #define __NR_setsockopt                355
 #define __NR_mlock2            356
+#define __NR_copy_file_range   357
 
-#define NR_syscalls            357
+#define NR_syscalls            358
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 33c02b15f47859a8262677d08635fcfdbb8872cb..a83707c83be803b78b3019cac6dde9dba2cfabd6 100644 (file)
@@ -948,7 +948,24 @@ linux_syscall_trace:
        cmp     %o0, 0
        bne     3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
+       sethi   %hi(sys_call_table), %l7
+       ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
+       or      %l7, %lo(sys_call_table), %l7
+       ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
+       ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
+       ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
+       ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
+       ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
+       cmp     %g1, NR_syscalls
+       bgeu    3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        mov     %i0, %o0
+       ld      [%l7 + %l4], %l7
        mov     %i1, %o1
        mov     %i2, %o2
        mov     %i3, %o3
index afbaba52d2f16cb30daf092f5cd3986e7ca9c299..d127130bf4246032d39cf923248fce7eebf92867 100644 (file)
@@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
        mov     %o1, %o4
        mov     HV_FAST_MACH_SET_WATCHDOG, %o5
        ta      HV_FAST_TRAP
+       brnz,a,pn %o4, 0f
        stx     %o1, [%o4]
-       retl
+0:     retl
         nop
 ENDPROC(sun4v_mach_set_watchdog)
 
index a92d5d2c46a3a6553bf13a57f95f65c6d61c334c..9e034f29dcc52208ca98a648e028f955f7c8f882 100644 (file)
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
 EXPORT_SYMBOL(sun4v_niagara_setperf);
 EXPORT_SYMBOL(sun4v_niagara2_getperf);
 EXPORT_SYMBOL(sun4v_niagara2_setperf);
+EXPORT_SYMBOL(sun4v_mach_set_watchdog);
 
 /* from hweight.S */
 EXPORT_SYMBOL(__arch_hweight8);
index bb0008927598b1f7bbeeccdf30c6fa154219a4ce..c4a1b5c40e4efc7ed17bcc0d67d1f058ea4f09f8 100644 (file)
@@ -158,7 +158,25 @@ linux_syscall_trace32:
         add    %sp, PTREGS_OFF, %o0
        brnz,pn %o0, 3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ldx     [%sp + PTREGS_OFF + PT_V9_G1], %g1
+       sethi   %hi(sys_call_table32), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I0], %i0
+       or      %l7, %lo(sys_call_table32), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I1], %i1
+       ldx     [%sp + PTREGS_OFF + PT_V9_I2], %i2
+       ldx     [%sp + PTREGS_OFF + PT_V9_I3], %i3
+       ldx     [%sp + PTREGS_OFF + PT_V9_I4], %i4
+       ldx     [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+       cmp     %g1, NR_syscalls
+       bgeu,pn %xcc, 3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        srl     %i0, 0, %o0
+       lduw    [%l7 + %l4], %l7
        srl     %i4, 0, %o4
        srl     %i1, 0, %o1
        srl     %i2, 0, %o2
@@ -170,7 +188,25 @@ linux_syscall_trace:
         add    %sp, PTREGS_OFF, %o0
        brnz,pn %o0, 3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ldx     [%sp + PTREGS_OFF + PT_V9_G1], %g1
+       sethi   %hi(sys_call_table64), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I0], %i0
+       or      %l7, %lo(sys_call_table64), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I1], %i1
+       ldx     [%sp + PTREGS_OFF + PT_V9_I2], %i2
+       ldx     [%sp + PTREGS_OFF + PT_V9_I3], %i3
+       ldx     [%sp + PTREGS_OFF + PT_V9_I4], %i4
+       ldx     [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+       cmp     %g1, NR_syscalls
+       bgeu,pn %xcc, 3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        mov     %i0, %o0
+       lduw    [%l7 + %l4], %l7
        mov     %i1, %o1
        mov     %i2, %o2
        mov     %i3, %o3
index e663b6c78de2e6498a5a458f1129504cae3ab2a8..6c3dd6c52f8bd09135e81f1d56704602d10c4f4e 100644 (file)
@@ -88,4 +88,4 @@ sys_call_table:
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/        .long sys_setsockopt, sys_mlock2
+/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range
index 1557121f4cdce8a6ae21c31fb33e9aa2d7cbc443..12b524cfcfa0120caabdf7aa5b8606ecc3978c96 100644 (file)
@@ -89,7 +89,7 @@ sys_call_table32:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word compat_sys_setsockopt, sys_mlock2
+       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@ sys_call_table:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word sys_setsockopt, sys_mlock2
+       .word sys_setsockopt, sys_mlock2, sys_copy_file_range
index a506c2c28943715770ab43fa451797a8cb1566e4..6ad99925900e0b15f68323592a2da385a10753e7 100644 (file)
@@ -126,15 +126,15 @@ void
 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 {
        struct pt_regs *thread_regs;
+       const int NGPRS = TREG_LAST_GPR + 1;
 
        if (task == NULL)
                return;
 
-       /* Initialize to zero. */
-       memset(gdb_regs, 0, NUMREGBYTES);
-
        thread_regs = task_pt_regs(task);
-       memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
+       memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
+       memset(&gdb_regs[NGPRS], 0,
+              (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
        gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
        gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
 }
index e13d41c392ae4f4940a41e6c8049099c71d28a04..f878bec23576c54c619b633ce0c81508519ddb56 100644 (file)
@@ -34,21 +34,18 @@ struct page;
 
 #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
 
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long pte; } pte_t;
 typedef struct { unsigned long pmd; } pmd_t;
 typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
-
-#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
-#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
-#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
-#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
-                             smp_wmb(); \
-                             (to).pte_low = (from).pte_low; })
-#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
-#define pte_set_val(pte, phys, prot) \
-       ({ (pte).pte_high = (phys) >> 32; \
-          (pte).pte_low = (phys) | pgprot_val(prot); })
+#define pte_val(p) ((p).pte)
+
+#define pte_get_bits(p, bits) ((p).pte & (bits))
+#define pte_set_bits(p, bits) ((p).pte |= (bits))
+#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
+#define pte_copy(to, from) ({ (to).pte = (from).pte; })
+#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
+#define pte_set_val(p, phys, prot) \
+       ({ (p).pte = (phys) | pgprot_val(prot); })
 
 #define pmd_val(x)     ((x).pmd)
 #define __pmd(x) ((pmd_t) { (x) } )
index 38b3f3785c3c82c6c55a98192d67bc60bd1f45a9..eb9dccecb33884cff6941706d7254abf5d983d67 100644 (file)
@@ -14,7 +14,6 @@
 
 #ifdef __KERNEL__
 #include <asm-generic/pci-dma-compat.h>
-#include <asm-generic/pci-bridge.h>
 #include <asm-generic/pci.h>
 #include <mach/hardware.h> /* for PCIBIOS_MIN_* */
 
@@ -23,5 +22,4 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
        enum pci_mmap_state mmap_state, int write_combine);
 
 #endif /* __KERNEL__ */
-
 #endif
index 330e738ccfc13633954a02677e46bb1b12605fa9..9af2e63384005002bc99c762f8a61fdd1bbbb3ca 100644 (file)
@@ -509,11 +509,10 @@ config X86_INTEL_CE
 
 config X86_INTEL_MID
        bool "Intel MID platform support"
-       depends on X86_32
        depends on X86_EXTENDED_PLATFORM
        depends on X86_PLATFORM_DEVICES
        depends on PCI
-       depends on PCI_GOANY
+       depends on X86_64 || (PCI_GOANY && X86_32)
        depends on X86_IO_APIC
        select SFI
        select I2C
index 881b4768644aad3537364286e468146aba73ad56..e7de5c9a4fbdd2c5d99b78d82d2c58019d20dc04 100644 (file)
@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
 
 #define __ARCH_HAS_DO_SOFTIRQ
 
+struct irq_desc;
+
 #ifdef CONFIG_HOTPLUG_CPU
 #include <linux/cpumask.h>
 extern int check_irq_vectors_for_cpu_disable(void);
 extern void fixup_irqs(void);
-extern void irq_force_complete_move(int);
+extern void irq_force_complete_move(struct irq_desc *desc);
 #endif
 
 #ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
 
-struct irq_desc;
 extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
index 04c27a0131656db29e97805432e56d3a1aae5992..4432ab7f407ccc5f47ea9c19a850c29af821d8c9 100644 (file)
@@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
 }
 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
 {
+       pgprotval_t val = pgprot_val(pgprot);
        pgprot_t new;
-       unsigned long val;
 
-       val = pgprot_val(pgprot);
        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
                ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
        return new;
 }
 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
 {
+       pgprotval_t val = pgprot_val(pgprot);
        pgprot_t new;
-       unsigned long val;
 
-       val = pgprot_val(pgprot);
        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
                          ((val & _PAGE_PAT_LARGE) >>
                           (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
index f25321894ad294278c20d5ba7b4076ce2353a8fb..fdb0fbfb1197a4cf4e485c4f330b3a399b3a6d5f 100644 (file)
@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
 {
        int pin, ioapic, irq, irq_entry;
        const struct cpumask *mask;
+       struct irq_desc *desc;
        struct irq_data *idata;
        struct irq_chip *chip;
 
@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
                if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
                        continue;
 
-               idata = irq_get_irq_data(irq);
+               desc = irq_to_desc(irq);
+               raw_spin_lock_irq(&desc->lock);
+               idata = irq_desc_get_irq_data(desc);
 
                /*
                 * Honour affinities which have been set in early boot
@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
                /* Might be lapic_chip for irq 0 */
                if (chip->irq_set_affinity)
                        chip->irq_set_affinity(idata, mask, false);
+               raw_spin_unlock_irq(&desc->lock);
        }
 }
 #endif
index 908cb37da171ebed1a361203dc520c15e4e42736..3b670df4ba7b429f48102b1401c9133957ecb6a9 100644 (file)
@@ -31,7 +31,7 @@ struct apic_chip_data {
 struct irq_domain *x86_vector_domain;
 EXPORT_SYMBOL_GPL(x86_vector_domain);
 static DEFINE_RAW_SPINLOCK(vector_lock);
-static cpumask_var_t vector_cpumask;
+static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
 static struct irq_chip lapic_controller;
 #ifdef CONFIG_X86_IO_APIC
 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -118,35 +118,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
         */
        static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
        static int current_offset = VECTOR_OFFSET_START % 16;
-       int cpu, err;
+       int cpu, vector;
 
-       if (d->move_in_progress)
+       /*
+        * If there is still a move in progress or the previous move has not
+        * been cleaned up completely, tell the caller to come back later.
+        */
+       if (d->move_in_progress ||
+           cpumask_intersects(d->old_domain, cpu_online_mask))
                return -EBUSY;
 
        /* Only try and allocate irqs on cpus that are present */
-       err = -ENOSPC;
        cpumask_clear(d->old_domain);
+       cpumask_clear(searched_cpumask);
        cpu = cpumask_first_and(mask, cpu_online_mask);
        while (cpu < nr_cpu_ids) {
-               int new_cpu, vector, offset;
+               int new_cpu, offset;
 
+               /* Get the possible target cpus for @mask/@cpu from the apic */
                apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
+               /*
+                * Clear the offline cpus from @vector_cpumask for searching
+                * and verify whether the result overlaps with @mask. If true,
+                * then the call to apic->cpu_mask_to_apicid_and() will
+                * succeed as well. If not, no point in trying to find a
+                * vector in this mask.
+                */
+               cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
+               if (!cpumask_intersects(vector_searchmask, mask))
+                       goto next_cpu;
+
                if (cpumask_subset(vector_cpumask, d->domain)) {
-                       err = 0;
                        if (cpumask_equal(vector_cpumask, d->domain))
-                               break;
+                               goto success;
                        /*
-                        * New cpumask using the vector is a proper subset of
-                        * the current in use mask. So cleanup the vector
-                        * allocation for the members that are not used anymore.
+                        * Mark the cpus which are not longer in the mask for
+                        * cleanup.
                         */
-                       cpumask_andnot(d->old_domain, d->domain,
-                                      vector_cpumask);
-                       d->move_in_progress =
-                          cpumask_intersects(d->old_domain, cpu_online_mask);
-                       cpumask_and(d->domain, d->domain, vector_cpumask);
-                       break;
+                       cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
+                       vector = d->cfg.vector;
+                       goto update;
                }
 
                vector = current_vector;
@@ -158,45 +170,60 @@ next:
                        vector = FIRST_EXTERNAL_VECTOR + offset;
                }
 
-               if (unlikely(current_vector == vector)) {
-                       cpumask_or(d->old_domain, d->old_domain,
-                                  vector_cpumask);
-                       cpumask_andnot(vector_cpumask, mask, d->old_domain);
-                       cpu = cpumask_first_and(vector_cpumask,
-                                               cpu_online_mask);
-                       continue;
-               }
+               /* If the search wrapped around, try the next cpu */
+               if (unlikely(current_vector == vector))
+                       goto next_cpu;
 
                if (test_bit(vector, used_vectors))
                        goto next;
 
-               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
+               for_each_cpu(new_cpu, vector_searchmask) {
                        if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
                                goto next;
                }
                /* Found one! */
                current_vector = vector;
                current_offset = offset;
-               if (d->cfg.vector) {
+               /* Schedule the old vector for cleanup on all cpus */
+               if (d->cfg.vector)
                        cpumask_copy(d->old_domain, d->domain);
-                       d->move_in_progress =
-                          cpumask_intersects(d->old_domain, cpu_online_mask);
-               }
-               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
+               for_each_cpu(new_cpu, vector_searchmask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
-               d->cfg.vector = vector;
-               cpumask_copy(d->domain, vector_cpumask);
-               err = 0;
-               break;
-       }
+               goto update;
 
-       if (!err) {
-               /* cache destination APIC IDs into cfg->dest_apicid */
-               err = apic->cpu_mask_to_apicid_and(mask, d->domain,
-                                                  &d->cfg.dest_apicid);
+next_cpu:
+               /*
+                * We exclude the current @vector_cpumask from the requested
+                * @mask and try again with the next online cpu in the
+                * result. We cannot modify @mask, so we use @vector_cpumask
+                * as a temporary buffer here as it will be reassigned when
+                * calling apic->vector_allocation_domain() above.
+                */
+               cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
+               cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+               cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
+               continue;
        }
+       return -ENOSPC;
 
-       return err;
+update:
+       /*
+        * Exclude offline cpus from the cleanup mask and set the
+        * move_in_progress flag when the result is not empty.
+        */
+       cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+       d->move_in_progress = !cpumask_empty(d->old_domain);
+       d->cfg.vector = vector;
+       cpumask_copy(d->domain, vector_cpumask);
+success:
+       /*
+        * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
+        * as we already established, that mask & d->domain & cpu_online_mask
+        * is not empty.
+        */
+       BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+                                           &d->cfg.dest_apicid));
+       return 0;
 }
 
 static int assign_irq_vector(int irq, struct apic_chip_data *data,
@@ -226,10 +253,8 @@ static int assign_irq_vector_policy(int irq, int node,
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
        struct irq_desc *desc;
-       unsigned long flags;
        int cpu, vector;
 
-       raw_spin_lock_irqsave(&vector_lock, flags);
        BUG_ON(!data->cfg.vector);
 
        vector = data->cfg.vector;
@@ -239,10 +264,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
        data->cfg.vector = 0;
        cpumask_clear(data->domain);
 
-       if (likely(!data->move_in_progress)) {
-               raw_spin_unlock_irqrestore(&vector_lock, flags);
+       /*
+        * If move is in progress or the old_domain mask is not empty,
+        * i.e. the cleanup IPI has not been processed yet, we need to remove
+        * the old references to desc from all cpus vector tables.
+        */
+       if (!data->move_in_progress && cpumask_empty(data->old_domain))
                return;
-       }
 
        desc = irq_to_desc(irq);
        for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -255,7 +283,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
                }
        }
        data->move_in_progress = 0;
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -276,19 +303,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
 static void x86_vector_free_irqs(struct irq_domain *domain,
                                 unsigned int virq, unsigned int nr_irqs)
 {
+       struct apic_chip_data *apic_data;
        struct irq_data *irq_data;
+       unsigned long flags;
        int i;
 
        for (i = 0; i < nr_irqs; i++) {
                irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
                if (irq_data && irq_data->chip_data) {
+                       raw_spin_lock_irqsave(&vector_lock, flags);
                        clear_irq_vector(virq + i, irq_data->chip_data);
-                       free_apic_chip_data(irq_data->chip_data);
+                       apic_data = irq_data->chip_data;
+                       irq_domain_reset_irq_data(irq_data);
+                       raw_spin_unlock_irqrestore(&vector_lock, flags);
+                       free_apic_chip_data(apic_data);
 #ifdef CONFIG_X86_IO_APIC
                        if (virq + i < nr_legacy_irqs())
                                legacy_irq_data[virq + i] = NULL;
 #endif
-                       irq_domain_reset_irq_data(irq_data);
                }
        }
 }
@@ -406,6 +438,8 @@ int __init arch_early_irq_init(void)
        arch_init_htirq_domain(x86_vector_domain);
 
        BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+       BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
+       BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
 
        return arch_early_ioapic_init();
 }
@@ -494,14 +528,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
 
        err = assign_irq_vector(irq, data, dest);
-       if (err) {
-               if (assign_irq_vector(irq, data,
-                                     irq_data_get_affinity_mask(irq_data)))
-                       pr_err("Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
-
-       return IRQ_SET_MASK_OK;
+       return err ? err : IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip lapic_controller = {
@@ -513,20 +540,12 @@ static struct irq_chip lapic_controller = {
 #ifdef CONFIG_SMP
 static void __send_cleanup_vector(struct apic_chip_data *data)
 {
-       cpumask_var_t cleanup_mask;
-
-       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
-               unsigned int i;
-
-               for_each_cpu_and(i, data->old_domain, cpu_online_mask)
-                       apic->send_IPI_mask(cpumask_of(i),
-                                           IRQ_MOVE_CLEANUP_VECTOR);
-       } else {
-               cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
-               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-               free_cpumask_var(cleanup_mask);
-       }
+       raw_spin_lock(&vector_lock);
+       cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
        data->move_in_progress = 0;
+       if (!cpumask_empty(data->old_domain))
+               apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
+       raw_spin_unlock(&vector_lock);
 }
 
 void send_cleanup_vector(struct irq_cfg *cfg)
@@ -570,12 +589,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
 
                /*
-                * Check if the irq migration is in progress. If so, we
-                * haven't received the cleanup request yet for this irq.
+                * Nothing to cleanup if irq migration is in progress
+                * or this cpu is not set in the cleanup mask.
                 */
-               if (data->move_in_progress)
+               if (data->move_in_progress ||
+                   !cpumask_test_cpu(me, data->old_domain))
                        goto unlock;
 
+               /*
+                * We have two cases to handle here:
+                * 1) vector is unchanged but the target mask got reduced
+                * 2) vector and the target mask has changed
+                *
+                * #1 is obvious, but in #2 we have two vectors with the same
+                * irq descriptor: the old and the new vector. So we need to
+                * make sure that we only cleanup the old vector. The new
+                * vector has the current @vector number in the config and
+                * this cpu is part of the target mask. We better leave that
+                * one alone.
+                */
                if (vector == data->cfg.vector &&
                    cpumask_test_cpu(me, data->domain))
                        goto unlock;
@@ -593,6 +625,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
                }
                __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+               cpumask_clear_cpu(me, data->old_domain);
 unlock:
                raw_spin_unlock(&desc->lock);
        }
@@ -621,12 +654,48 @@ void irq_complete_move(struct irq_cfg *cfg)
        __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
 }
 
-void irq_force_complete_move(int irq)
+/*
+ * Called with @desc->lock held and interrupts disabled.
+ */
+void irq_force_complete_move(struct irq_desc *desc)
 {
-       struct irq_cfg *cfg = irq_cfg(irq);
+       struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+       struct apic_chip_data *data = apic_chip_data(irqdata);
+       struct irq_cfg *cfg = data ? &data->cfg : NULL;
 
-       if (cfg)
-               __irq_complete_move(cfg, cfg->vector);
+       if (!cfg)
+               return;
+
+       __irq_complete_move(cfg, cfg->vector);
+
+       /*
+        * This is tricky. If the cleanup of @data->old_domain has not been
+        * done yet, then the following setaffinity call will fail with
+        * -EBUSY. This can leave the interrupt in a stale state.
+        *
+        * The cleanup cannot make progress because we hold @desc->lock. So in
+        * case @data->old_domain is not yet cleaned up, we need to drop the
+        * lock and acquire it again. @desc cannot go away, because the
+        * hotplug code holds the sparse irq lock.
+        */
+       raw_spin_lock(&vector_lock);
+       /* Clean out all offline cpus (including ourself) first. */
+       cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+       while (!cpumask_empty(data->old_domain)) {
+               raw_spin_unlock(&vector_lock);
+               raw_spin_unlock(&desc->lock);
+               cpu_relax();
+               raw_spin_lock(&desc->lock);
+               /*
+                * Reevaluate apic_chip_data. It might have been cleared after
+                * we dropped @desc->lock.
+                */
+               data = apic_chip_data(irqdata);
+               if (!data)
+                       return;
+               raw_spin_lock(&vector_lock);
+       }
+       raw_spin_unlock(&vector_lock);
 }
 #endif
 
index d760c6bb37b53c25931bef3bc47c903def86c8ee..624db00583f44a76283c4d8d5e5377a8342f6517 100644 (file)
@@ -889,7 +889,10 @@ void __init uv_system_init(void)
                return;
        }
        pr_info("UV: Found %s hub\n", hub);
-       map_low_mmrs();
+
+       /* We now only need to map the MMRs on UV1 */
+       if (is_uv1_hub())
+               map_low_mmrs();
 
        m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
index a667078a51807bb5bd1ca2e5d5c4ff205e9dd42b..fed2ab1f1065dfdd5a286822102857a013298dfc 100644 (file)
@@ -1960,7 +1960,8 @@ intel_bts_constraints(struct perf_event *event)
 
 static int intel_alt_er(int idx, u64 config)
 {
-       int alt_idx;
+       int alt_idx = idx;
+
        if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
                return idx;
 
@@ -2897,14 +2898,12 @@ static void intel_pmu_cpu_starting(int cpu)
                return;
 
        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
-               void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
-
                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
                        struct intel_shared_regs *pc;
 
                        pc = per_cpu(cpu_hw_events, i).shared_regs;
                        if (pc && pc->core_id == core_id) {
-                               *onln = cpuc->shared_regs;
+                               cpuc->kfree_on_online[0] = cpuc->shared_regs;
                                cpuc->shared_regs = pc;
                                break;
                        }
index f97f8075bf04c9d6efed980370feca46227afc5a..3bf41d41377505ef28d90ed3363f08e83df70db3 100644 (file)
@@ -995,6 +995,9 @@ static int __init uncore_pci_init(void)
        case 87: /* Knights Landing */
                ret = knl_uncore_pci_init();
                break;
+       case 94: /* SkyLake */
+               ret = skl_uncore_pci_init();
+               break;
        default:
                return 0;
        }
index 07aa2d6bd71094e411a9f98535fc4c65208a66d4..a7086b862156627ebe7ca22996aed6126e61e770 100644 (file)
@@ -336,6 +336,7 @@ int snb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
 int bdw_uncore_pci_init(void);
+int skl_uncore_pci_init(void);
 void snb_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
index 0b934820fafd1f64747640e09f1544b862eb8c01..2bd030ddd0db3bf6f5ada42830bd9be9ccc4c1b4 100644 (file)
@@ -8,6 +8,7 @@
 #define PCI_DEVICE_ID_INTEL_HSW_IMC    0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC  0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC    0x1604
+#define PCI_DEVICE_ID_INTEL_SKL_IMC    0x191f
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -524,6 +525,14 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
        { /* end: all zeroes */ },
 };
 
+static const struct pci_device_id skl_uncore_pci_ids[] = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
 static struct pci_driver snb_uncore_pci_driver = {
        .name           = "snb_uncore",
        .id_table       = snb_uncore_pci_ids,
@@ -544,6 +553,11 @@ static struct pci_driver bdw_uncore_pci_driver = {
        .id_table       = bdw_uncore_pci_ids,
 };
 
+static struct pci_driver skl_uncore_pci_driver = {
+       .name           = "skl_uncore",
+       .id_table       = skl_uncore_pci_ids,
+};
+
 struct imc_uncore_pci_dev {
        __u32 pci_id;
        struct pci_driver *driver;
@@ -558,6 +572,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
        IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
        IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
+       IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
        {  /* end marker */ }
 };
 
@@ -610,6 +625,11 @@ int bdw_uncore_pci_init(void)
        return imc_uncore_pci_init();
 }
 
+int skl_uncore_pci_init(void)
+{
+       return imc_uncore_pci_init();
+}
+
 /* end of Sandy Bridge uncore support */
 
 /* Nehalem uncore support */
index f129a9af635747ce616e354a43f30297f3fecabf..2c0f3407bd1f1ae8adcf43cacdb479c91c096669 100644 (file)
@@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data)
 
        reserve_ebda_region();
 
+       switch (boot_params.hdr.hardware_subarch) {
+       case X86_SUBARCH_INTEL_MID:
+               x86_intel_mid_early_setup();
+               break;
+       default:
+               break;
+       }
+
        start_kernel();
 }
index f8062aaf5df9c830ec287f6774ad270e13d34b32..61521dc19c102114e177cbc21a4f5da9d94c20cd 100644 (file)
@@ -462,7 +462,7 @@ void fixup_irqs(void)
                 * non intr-remapping case, we can't wait till this interrupt
                 * arrives at this cpu before completing the irq move.
                 */
-               irq_force_complete_move(irq);
+               irq_force_complete_move(desc);
 
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        break_affinity = 1;
@@ -470,6 +470,15 @@ void fixup_irqs(void)
                }
 
                chip = irq_data_get_irq_chip(data);
+               /*
+                * The interrupt descriptor might have been cleaned up
+                * already, but it is not yet removed from the radix tree
+                */
+               if (!chip) {
+                       raw_spin_unlock(&desc->lock);
+                       continue;
+               }
+
                if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
                        chip->irq_mask(data);
 
index 42982b26e32be693713d90e8fa4e18b8f771eb4a..740d7ac03a552bc4937edfc8ff7e8b9d044a61b6 100644 (file)
@@ -173,10 +173,10 @@ static __init int setup_hugepagesz(char *opt)
 }
 __setup("hugepagesz=", setup_hugepagesz);
 
-#ifdef CONFIG_CMA
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 static __init int gigantic_pages_init(void)
 {
-       /* With CMA we can allocate gigantic pages at runtime */
+       /* With compaction or CMA we can allocate gigantic pages at runtime */
        if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
        return 0;
index fc6a4c8f6e2abc79c29803264ad16a48067904e8..2440814b00699e33809ce061d6f86a48437f7a10 100644 (file)
@@ -33,7 +33,7 @@ struct cpa_data {
        pgd_t           *pgd;
        pgprot_t        mask_set;
        pgprot_t        mask_clr;
-       int             numpages;
+       unsigned long   numpages;
        int             flags;
        unsigned long   pfn;
        unsigned        force_split : 1;
@@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
                 * CPA operation. Either a large page has been
                 * preserved or a single page update happened.
                 */
-               BUG_ON(cpa->numpages > numpages);
+               BUG_ON(cpa->numpages > numpages || !cpa->numpages);
                numpages -= cpa->numpages;
                if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
                        cpa->curpage++;
index 2879efc73a967bca70606547014decd3703c84bf..b4a9f23d77d9090eb715df2ee76c247bc5af6683 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/dmi.h>
 #include <linux/slab.h>
 
-#include <asm-generic/pci-bridge.h>
 #include <asm/acpi.h>
 #include <asm/segment.h>
 #include <asm/io.h>
index 1c7380da65ffab641f0cb0aedc88cb7ef7e8f6cc..2d66db8f80f992d3b0609373502031aacf91f161 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <asm/efi.h>
 #include <asm/uv/uv.h>
 
@@ -248,6 +249,16 @@ out:
        return ret;
 }
 
+static const struct dmi_system_id sgi_uv1_dmi[] = {
+       { NULL, "SGI UV1",
+               {       DMI_MATCH(DMI_PRODUCT_NAME,     "Stoutland Platform"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION,  "1.0"),
+                       DMI_MATCH(DMI_BIOS_VENDOR,      "SGI.COM"),
+               }
+       },
+       { } /* NULL entry stops DMI scanning */
+};
+
 void __init efi_apply_memmap_quirks(void)
 {
        /*
@@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void)
                efi_unmap_memmap();
        }
 
-       /*
-        * UV doesn't support the new EFI pagetable mapping yet.
-        */
-       if (is_uv_system())
+       /* UV2+ BIOS has a fix for this issue.  UV1 still needs the quirk. */
+       if (dmi_check_system(sgi_uv1_dmi))
                set_bit(EFI_OLD_MEMMAP, &efi.flags);
 }
 
index 1bbc21e2e4aeab9fadaaa1f21856ab0ab9a4cea1..90bb997ed0a21889fd95482bfe298862b46eaacf 100644 (file)
@@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void)
                intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
        else {
                intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-               pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
+               pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
        }
 
 out:
@@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg)
        else if (strcmp("lapic_and_apbt", arg) == 0)
                intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
        else {
-               pr_warn("X86 INTEL_MID timer option %s not recognised"
-                          " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
-                          arg);
+               pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+                       arg);
                return -EINVAL;
        }
        return 0;
 }
 __setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
-
index c1bdafaac3ca828cec43af9e43d12933e4de7ea9..c61b6c332e971929fd4d639b9754e8707f513930 100644 (file)
@@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
                if (imr_is_enabled(&imr)) {
                        base = imr_to_phys(imr.addr_lo);
                        end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+                       size = end - base + 1;
                } else {
                        base = 0;
                        end = 0;
+                       size = 0;
                }
-               size = end - base;
                seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
                           "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
                           &base, &end, size, imr.rmask, imr.wmask,
@@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
 {
        phys_addr_t base = virt_to_phys(&_text);
        size_t size = virt_to_phys(&__end_rodata) - base;
+       unsigned long start, end;
        int i;
        int ret;
 
@@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
        for (i = 0; i < idev->max_imr; i++)
                imr_clear(i);
 
+       start = (unsigned long)_text;
+       end = (unsigned long)__end_rodata - 1;
+
        /*
         * Setup a locked IMR around the physical extent of the kernel
         * from the beginning of the .text secton to the end of the
         * .rodata section as one physically contiguous block.
+        *
+        * We don't round up @size since it is already PAGE_SIZE aligned.
+        * See vmlinux.lds.S for details.
         */
        ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
        if (ret < 0) {
-               pr_err("unable to setup IMR for kernel: (%p - %p)\n",
-                       &_text, &__end_rodata);
+               pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
+                       size / 1024, start, end);
        } else {
-               pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
-                       size / 1024, &_text, &__end_rodata);
+               pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
+                       size / 1024, start, end);
        }
 
 }
index 8502ad30e61bcfc49a9e28c975d356b4b628a62c..5adb6a2fd117df01c232156e2091bfc3d190eac7 100644 (file)
@@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
                exit(1);
        }
 
-       printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
+       printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
        printf("Locating the top of the address space ... ");
        fflush(stdout);
 
@@ -134,7 +134,7 @@ out:
                exit(1);
        }
        top <<= UM_KERN_PAGE_SHIFT;
-       printf("0x%x\n", top);
+       printf("0x%lx\n", top);
 
        return top;
 }
index e9df1567d778e2f87ecdb1a64df1c2dfeb874071..a3b4e907c5bf28dcba1803c309cbbcceb6c1b2ca 100644 (file)
@@ -138,6 +138,22 @@ config XTENSA_VARIANT_HAVE_PERF_EVENTS
 
          If unsure, say N.
 
+config XTENSA_FAKE_NMI
+       bool "Treat PMM IRQ as NMI"
+       depends on XTENSA_VARIANT_HAVE_PERF_EVENTS
+       default n
+       help
+         If PMM IRQ is the only IRQ at EXCM level it is safe to
+         treat it as NMI, which improves accuracy of profiling.
+
+         If there are other interrupts at or above PMM IRQ priority level
+         but not above the EXCM level, PMM IRQ still may be treated as NMI,
+         but only if these IRQs are not used. There will be a build warning
+         saying that this is not safe, and a bugcheck if one of these IRQs
+         actually fire.
+
+         If unsure, say N.
+
 config XTENSA_UNALIGNED_USER
        bool "Unaligned memory access in use space"
        help
index 74fed0b4e2c2b58a84bd0bbc699d7bca74f422de..c38e5a732d86f69b44826349314a6fb286305985 100644 (file)
 
 #ifdef CONFIG_MMU
 
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
+void xtensa_iounmap(volatile void __iomem *addr);
+
 /*
  * Return the virtual address for the specified bus memory.
- * Note that we currently don't support any address outside the KIO segment.
  */
 static inline void __iomem *ioremap_nocache(unsigned long offset,
                unsigned long size)
@@ -36,7 +39,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset,
            && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
                return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
        else
-               BUG();
+               return xtensa_ioremap_nocache(offset, size);
 }
 
 static inline void __iomem *ioremap_cache(unsigned long offset,
@@ -46,7 +49,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
            && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
                return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
        else
-               BUG();
+               return xtensa_ioremap_cache(offset, size);
 }
 #define ioremap_cache ioremap_cache
 
@@ -60,6 +63,13 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 
 static inline void iounmap(volatile void __iomem *addr)
 {
+       unsigned long va = (unsigned long) addr;
+
+       if (!(va >= XCHAL_KIO_CACHED_VADDR &&
+             va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
+           !(va >= XCHAL_KIO_BYPASS_VADDR &&
+             va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+               xtensa_iounmap(addr);
 }
 
 #define virt_to_bus     virt_to_phys
index 83e2e4bc01ba24f54965df8b4ee3de7855e028e0..744ecf0dc3a4f51c441732493f29ab977ae572ea 100644 (file)
 #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
 #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
 
-#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
 
 #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
 
 /* LOCKLEVEL defines the interrupt level that masks all
  * general-purpose interrupts.
  */
-#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
-       defined(XCHAL_PROFILING_INTERRUPT) && \
-       PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
-       XCHAL_EXCM_LEVEL > 1 && \
-       IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
-#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
 #else
 #define LOCKLEVEL XCHAL_EXCM_LEVEL
 #endif
+
 #define TOPLEVEL XCHAL_EXCM_LEVEL
 #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
 
index ca929e6a38b5f70a5e1e2d1492b1cdde5e5f0e2a..f9b389d4e97393b1c93be083b8db149b632602fa 100644 (file)
 #include <asm/processor.h>
 #include <linux/stringify.h>
 
-#define _INTLEVEL(x)   XCHAL_INT ## x ## _LEVEL
-#define INTLEVEL(x)    _INTLEVEL(x)
-
 #if XCHAL_NUM_TIMERS > 0 && \
-       INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+       XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     0
 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
 #elif XCHAL_NUM_TIMERS > 1 && \
-       INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+       XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     1
 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
 #elif XCHAL_NUM_TIMERS > 2 && \
-       INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+       XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     2
 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
 #else
index 42d441f7898b94db35e3a2cd55af500f23f6435a..be0cae8082c71be5d42c6b5ecf02991cd67c7ec6 100644 (file)
@@ -205,6 +205,32 @@ extern void do_IRQ(int, struct pt_regs *);
 
 #if XTENSA_FAKE_NMI
 
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
+#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
+#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
+
+static inline void check_valid_nmi(void)
+{
+       unsigned intread = get_sr(interrupt);
+       unsigned intenable = get_sr(intenable);
+
+       BUG_ON(intread & intenable &
+              ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
+                XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
+                BIT(XCHAL_PROFILING_INTERRUPT)));
+}
+
+#else
+
+static inline void check_valid_nmi(void)
+{
+}
+
+#endif
+
 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
 
 DEFINE_PER_CPU(unsigned long, nmi_count);
@@ -219,6 +245,7 @@ void do_nmi(struct pt_regs *regs)
        old_regs = set_irq_regs(regs);
        nmi_enter();
        ++*this_cpu_ptr(&nmi_count);
+       check_valid_nmi();
        xtensa_pmu_irq_handler(0, NULL);
        nmi_exit();
        set_irq_regs(old_regs);
index e601e2fbe8e6ebadc109a1c2754f6a47ce64f1c2..0b3d296a016a38cd1373588e52a1623b14833403 100644 (file)
@@ -3,5 +3,5 @@
 #
 
 obj-y                  := init.o misc.o
-obj-$(CONFIG_MMU)      += cache.o fault.o mmu.o tlb.o
+obj-$(CONFIG_MMU)      += cache.o fault.o ioremap.o mmu.o tlb.o
 obj-$(CONFIG_HIGHMEM)  += highmem.o
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
new file mode 100644 (file)
index 0000000..d89c3c5
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * ioremap implementation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
+                                   pgprot_t prot)
+{
+       unsigned long offset = paddr & ~PAGE_MASK;
+       unsigned long pfn = __phys_to_pfn(paddr);
+       struct vm_struct *area;
+       unsigned long vaddr;
+       int err;
+
+       paddr &= PAGE_MASK;
+
+       WARN_ON(pfn_valid(pfn));
+
+       size = PAGE_ALIGN(offset + size);
+
+       area = get_vm_area(size, VM_IOREMAP);
+       if (!area)
+               return NULL;
+
+       vaddr = (unsigned long)area->addr;
+       area->phys_addr = paddr;
+
+       err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+
+       if (err) {
+               vunmap((void *)vaddr);
+               return NULL;
+       }
+
+       flush_cache_vmap(vaddr, vaddr + size);
+       return (void __iomem *)(offset + vaddr);
+}
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
+{
+       return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
+}
+EXPORT_SYMBOL(xtensa_ioremap_nocache);
+
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
+{
+       return xtensa_ioremap(addr, size, PAGE_KERNEL);
+}
+EXPORT_SYMBOL(xtensa_ioremap_cache);
+
+void xtensa_iounmap(volatile void __iomem *io_addr)
+{
+       void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+
+       vunmap(addr);
+}
+EXPORT_SYMBOL(xtensa_iounmap);
index 1699df5b0493a721a5ee1945a07c8ec516856164..888a7fec81f715199c3050576516e55b692f2b9b 100644 (file)
@@ -70,6 +70,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 }
 
+static inline unsigned get_max_io_size(struct request_queue *q,
+                                      struct bio *bio)
+{
+       unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+       unsigned mask = queue_logical_block_size(q) - 1;
+
+       /* aligned to logical block size */
+       sectors &= ~(mask >> 9);
+
+       return sectors;
+}
+
 static struct bio *blk_bio_segment_split(struct request_queue *q,
                                         struct bio *bio,
                                         struct bio_set *bs,
@@ -81,6 +93,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        unsigned front_seg_size = bio->bi_seg_front_size;
        bool do_split = true;
        struct bio *new = NULL;
+       const unsigned max_sectors = get_max_io_size(q, bio);
 
        bio_for_each_segment(bv, bio, iter) {
                /*
@@ -90,20 +103,19 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
                        goto split;
 
-               if (sectors + (bv.bv_len >> 9) >
-                               blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
+               if (sectors + (bv.bv_len >> 9) > max_sectors) {
                        /*
                         * Consider this a new segment if we're splitting in
                         * the middle of this vector.
                         */
                        if (nsegs < queue_max_segments(q) &&
-                           sectors < blk_max_size_offset(q,
-                                               bio->bi_iter.bi_sector)) {
+                           sectors < max_sectors) {
                                nsegs++;
-                               sectors = blk_max_size_offset(q,
-                                               bio->bi_iter.bi_sector);
+                               sectors = max_sectors;
                        }
-                       goto split;
+                       if (sectors)
+                               goto split;
+                       /* Make this single bvec as the 1st segment */
                }
 
                if (bvprvp && blk_queue_cluster(q)) {
index 77f5d17779d6875d0595f23410cbd61116491450..d8996bbd7f12805b9c74429813f5b3ee3d744bdc 100644 (file)
@@ -434,42 +434,6 @@ bool blkdev_dax_capable(struct block_device *bdev)
 
        return true;
 }
-
-static int blkdev_daxset(struct block_device *bdev, unsigned long argp)
-{
-       unsigned long arg;
-       int rc = 0;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
-       if (get_user(arg, (int __user *)(argp)))
-               return -EFAULT;
-       arg = !!arg;
-       if (arg == !!(bdev->bd_inode->i_flags & S_DAX))
-               return 0;
-
-       if (arg)
-               arg = S_DAX;
-
-       if (arg && !blkdev_dax_capable(bdev))
-               return -ENOTTY;
-
-       inode_lock(bdev->bd_inode);
-       if (bdev->bd_map_count == 0)
-               inode_set_flags(bdev->bd_inode, arg, S_DAX);
-       else
-               rc = -EBUSY;
-       inode_unlock(bdev->bd_inode);
-       return rc;
-}
-#else
-static int blkdev_daxset(struct block_device *bdev, int arg)
-{
-       if (arg)
-               return -ENOTTY;
-       return 0;
-}
 #endif
 
 static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
@@ -634,8 +598,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case BLKTRACESETUP:
        case BLKTRACETEARDOWN:
                return blk_trace_ioctl(bdev, cmd, argp);
-       case BLKDAXSET:
-               return blkdev_daxset(bdev, arg);
        case BLKDAXGET:
                return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
                break;
index 746935a5973ca6c76b8f66cfff8d58ce50566994..fefd01b496a0852754d9a586c9b0dd6d940ff746 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kmod.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
+#include <linux/dax.h>
 #include <linux/blktrace_api.h>
 
 #include "partitions/check.h"
@@ -550,13 +551,24 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
        return 0;
 }
 
-unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
 {
        struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+       return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+                       NULL);
+}
+
+unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+{
        struct page *page;
 
-       page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
-                                NULL);
+       /* don't populate page cache for dax capable devices */
+       if (IS_DAX(bdev->bd_inode))
+               page = read_dax_sector(bdev, n);
+       else
+               page = read_pagecache_sector(bdev, n);
+
        if (!IS_ERR(page)) {
                if (PageError(page))
                        goto fail;
index f6bfdda1a0b96890e4b9e335e19ef709e0731e7e..93a1fdc1feee68c9a8b15bef682886015884ce98 100644 (file)
@@ -465,11 +465,13 @@ config CRYPTO_CRCT10DIF_PCLMUL
 config CRYPTO_GHASH
        tristate "GHASH digest algorithm"
        select CRYPTO_GF128MUL
+       select CRYPTO_HASH
        help
          GHASH is message digest algorithm for GCM (Galois/Counter Mode).
 
 config CRYPTO_POLY1305
        tristate "Poly1305 authenticator algorithm"
+       select CRYPTO_HASH
        help
          Poly1305 authenticator algorithm, RFC7539.
 
index 608a7562839d2fe549fbebd8714733436ed2c481..68a5ceaa04c81072f453a7b2ca2605bed39bd5a1 100644 (file)
@@ -54,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
 
        lock_sock(sk);
        if (!ctx->more) {
-               err = crypto_ahash_init(&ctx->req);
+               err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
+                                               &ctx->completion);
                if (err)
                        goto unlock;
        }
@@ -125,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
        } else {
                if (!ctx->more) {
                        err = crypto_ahash_init(&ctx->req);
+                       err = af_alg_wait_for_completion(err, &ctx->completion);
                        if (err)
                                goto unlock;
                }
index 38c1aa89d3a0a7c4448bcd094ca1f268b9156a2d..28556fce42671e2f182d5239d3dc6468e5b1d970 100644 (file)
@@ -65,18 +65,10 @@ struct skcipher_async_req {
        struct skcipher_async_rsgl first_sgl;
        struct list_head list;
        struct scatterlist *tsg;
-       char iv[];
+       atomic_t *inflight;
+       struct skcipher_request req;
 };
 
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
-       crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
-       crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
-       crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
                      sizeof(struct scatterlist) - 1)
 
@@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
 
 static void skcipher_async_cb(struct crypto_async_request *req, int err)
 {
-       struct sock *sk = req->data;
-       struct alg_sock *ask = alg_sk(sk);
-       struct skcipher_ctx *ctx = ask->private;
-       struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+       struct skcipher_async_req *sreq = req->data;
        struct kiocb *iocb = sreq->iocb;
 
-       atomic_dec(&ctx->inflight);
+       atomic_dec(sreq->inflight);
        skcipher_free_async_sgls(sreq);
-       kfree(req);
+       kzfree(sreq);
        iocb->ki_complete(iocb, err, err);
 }
 
@@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
        struct skcipher_ctx *ctx = ask->private;
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+       struct skcipher_tfm *skc = pask->private;
+       struct crypto_skcipher *tfm = skc->skcipher;
        unsigned ivsize = crypto_skcipher_ivsize(tfm);
        struct skcipher_sg_list *sgl;
        struct af_alg_control con = {};
@@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
        struct skcipher_ctx *ctx = ask->private;
+       struct skcipher_tfm *skc = pask->private;
+       struct crypto_skcipher *tfm = skc->skcipher;
        struct skcipher_sg_list *sgl;
        struct scatterlist *sg;
        struct skcipher_async_req *sreq;
        struct skcipher_request *req;
        struct skcipher_async_rsgl *last_rsgl = NULL;
-       unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
-       unsigned int reqlen = sizeof(struct skcipher_async_req) +
-                               GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+       unsigned int txbufs = 0, len = 0, tx_nents;
+       unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
        int err = -ENOMEM;
        bool mark = false;
+       char *iv;
 
-       lock_sock(sk);
-       req = kmalloc(reqlen, GFP_KERNEL);
-       if (unlikely(!req))
-               goto unlock;
+       sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+       if (unlikely(!sreq))
+               goto out;
 
-       sreq = GET_SREQ(req, ctx);
+       req = &sreq->req;
+       iv = (char *)(req + 1) + reqsize;
        sreq->iocb = msg->msg_iocb;
-       memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
        INIT_LIST_HEAD(&sreq->list);
+       sreq->inflight = &ctx->inflight;
+
+       lock_sock(sk);
+       tx_nents = skcipher_all_sg_nents(ctx);
        sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
-       if (unlikely(!sreq->tsg)) {
-               kfree(req);
+       if (unlikely(!sreq->tsg))
                goto unlock;
-       }
        sg_init_table(sreq->tsg, tx_nents);
-       memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
-       skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                     skcipher_async_cb, sk);
+       memcpy(iv, ctx->iv, ivsize);
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     skcipher_async_cb, sreq);
 
        while (iov_iter_count(&msg->msg_iter)) {
                struct skcipher_async_rsgl *rsgl;
@@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
                sg_mark_end(sreq->tsg + txbufs - 1);
 
        skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
-                                  len, sreq->iv);
+                                  len, iv);
        err = ctx->enc ? crypto_skcipher_encrypt(req) :
                         crypto_skcipher_decrypt(req);
        if (err == -EINPROGRESS) {
                atomic_inc(&ctx->inflight);
                err = -EIOCBQUEUED;
+               sreq = NULL;
                goto unlock;
        }
 free:
        skcipher_free_async_sgls(sreq);
-       kfree(req);
 unlock:
        skcipher_wmem_wakeup(sk);
        release_sock(sk);
+       kzfree(sreq);
+out:
        return err;
 }
 
@@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
        struct skcipher_ctx *ctx = ask->private;
-       unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
-               &ctx->req));
+       struct skcipher_tfm *skc = pask->private;
+       struct crypto_skcipher *tfm = skc->skcipher;
+       unsigned bs = crypto_skcipher_blocksize(tfm);
        struct skcipher_sg_list *sgl;
        struct scatterlist *sg;
        int err = -EAGAIN;
@@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
        ask->private = ctx;
 
        skcipher_request_set_tfm(&ctx->req, skcipher);
-       skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+       skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+                                                CRYPTO_TFM_REQ_MAY_BACKLOG,
                                      af_alg_complete, &ctx->completion);
 
        sk->sk_destruct = skcipher_sock_destruct;
index 237f3795cfaaa1f988fadf5b07eefe3c44609091..43fe85f20d577b4f3d1bbd6576b6d752bc578531 100644 (file)
@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (link->dump == NULL)
                        return -EINVAL;
 
+               down_read(&crypto_alg_sem);
                list_for_each_entry(alg, &crypto_alg_list, cra_list)
                        dump_alloc += CRYPTO_REPORT_MAXSIZE;
 
@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                .done = link->done,
                                .min_dump_alloc = dump_alloc,
                        };
-                       return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+                       err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
                }
+               up_read(&crypto_alg_sem);
+
+               return err;
        }
 
        err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
index 472bc141f50de87e687b2d42940bffe165230d1e..a051541a4a1718c996ba7a7b678b5b9e5e857488 100644 (file)
@@ -354,11 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
        crt->final = shash_async_final;
        crt->finup = shash_async_finup;
        crt->digest = shash_async_digest;
+       crt->setkey = shash_async_setkey;
+
+       crt->has_setkey = alg->setkey != shash_no_setkey;
 
-       if (alg->setkey) {
-               crt->setkey = shash_async_setkey;
-               crt->has_setkey = true;
-       }
        if (alg->export)
                crt->export = shash_async_export;
        if (alg->import)
index c570b1d9f09480ecec5513d3cfe61d33c26c81b5..0872d5fecb82f2dee893c7feb0ac6a408f903384 100644 (file)
@@ -880,7 +880,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
                break;
        case BUS_NOTIFY_DRIVER_NOT_BOUND:
        case BUS_NOTIFY_UNBOUND_DRIVER:
-               pdev->dev.pm_domain = NULL;
+               dev_pm_domain_set(&pdev->dev, NULL);
                break;
        case BUS_NOTIFY_ADD_DEVICE:
                dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
index 90e2d54be526bcc0df8ec41bf292a8016d99ba3d..1316ddd92fac0f6c59f4be85759947a2d1643334 100644 (file)
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
-       {
-       .callback = video_detect_force_vendor,
-       .ident = "Dell Inspiron 5737",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
-               },
-       },
 
        /*
         * These models have a working acpi_video backlight control, and using
index 594fcabd22cd16bfcc09626338a3da33481497cc..546a3692774f8d3819ef76d9bcc656ef1af1c99d 100644 (file)
@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
        { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
        { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
        { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
index a4faa438889c075070084fb1f1cd943d61082a88..167ba7e3b92e8fcec115ddcbd1988326e7c67ba5 100644 (file)
@@ -240,8 +240,7 @@ enum {
                                                        error-handling stage) */
        AHCI_HFLAG_NO_DEVSLP            = (1 << 17), /* no device sleep */
        AHCI_HFLAG_NO_FBS               = (1 << 18), /* no FBS */
-       AHCI_HFLAG_EDGE_IRQ             = (1 << 19), /* HOST_IRQ_STAT behaves as
-                                                       Edge Triggered */
+
 #ifdef CONFIG_PCI_MSI
        AHCI_HFLAG_MULTI_MSI            = (1 << 20), /* multiple PCI MSIs */
        AHCI_HFLAG_MULTI_MSIX           = (1 << 21), /* per-port MSI-X */
@@ -250,6 +249,7 @@ enum {
        AHCI_HFLAG_MULTI_MSI            = 0,
        AHCI_HFLAG_MULTI_MSIX           = 0,
 #endif
+       AHCI_HFLAG_WAKE_BEFORE_STOP     = (1 << 22), /* wake before DMA stop */
 
        /* ap->flags bits */
 
@@ -360,6 +360,7 @@ struct ahci_host_priv {
         * be overridden anytime before the host is activated.
         */
        void                    (*start_engine)(struct ata_port *ap);
+       irqreturn_t             (*irq_handler)(int irq, void *dev_instance);
 };
 
 #ifdef CONFIG_PCI_MSI
@@ -423,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
 int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
 void ahci_error_handler(struct ata_port *ap);
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
                                             unsigned int port_no)
index b36cae2fd04b2b2969cfefcfe54f63beadef48e2..e87bcec0fd7c31d9623e54cf84fa6c62b41f2f39 100644 (file)
@@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
        if (IS_ERR(hpriv))
                return PTR_ERR(hpriv);
        hpriv->plat_data = priv;
+       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
 
        brcm_sata_alpm_init(hpriv);
 
index e2c6d9e0c5ac5ce5ce389b0ae1b0597ae979ee8b..8e3f7faf00d383f8eea284fe122e90a44ee7ce40 100644 (file)
@@ -548,6 +548,88 @@ softreset_retry:
        return rc;
 }
 
+/**
+ * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
+ * @ata_host: Host that recieved the irq
+ * @irq_masked: HOST_IRQ_STAT value
+ *
+ * For hardware with broken edge trigger latch
+ * the HOST_IRQ_STAT register misses the edge interrupt
+ * when clearing of HOST_IRQ_STAT register and hardware
+ * reporting the PORT_IRQ_STAT register at the
+ * same clock cycle.
+ * As such, the algorithm below outlines the workaround.
+ *
+ * 1. Read HOST_IRQ_STAT register and save the state.
+ * 2. Clear the HOST_IRQ_STAT register.
+ * 3. Read back the HOST_IRQ_STAT register.
+ * 4. If HOST_IRQ_STAT register equals to zero, then
+ *    traverse the rest of port's PORT_IRQ_STAT register
+ *    to check if an interrupt is triggered at that point else
+ *    go to step 6.
+ * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
+ *    then update the state of HOST_IRQ_STAT saved in step 1.
+ * 6. Handle port interrupts.
+ * 7. Exit
+ */
+static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
+                                            u32 irq_masked)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *port_mmio;
+       int i;
+
+       if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
+               for (i = 0; i < host->n_ports; i++) {
+                       if (irq_masked & (1 << i))
+                               continue;
+
+                       port_mmio = ahci_port_base(host->ports[i]);
+                       if (readl(port_mmio + PORT_IRQ_STAT))
+                               irq_masked |= (1 << i);
+               }
+       }
+
+       return ahci_handle_port_intr(host, irq_masked);
+}
+
+static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
+{
+       struct ata_host *host = dev_instance;
+       struct ahci_host_priv *hpriv;
+       unsigned int rc = 0;
+       void __iomem *mmio;
+       u32 irq_stat, irq_masked;
+
+       VPRINTK("ENTER\n");
+
+       hpriv = host->private_data;
+       mmio = hpriv->mmio;
+
+       /* sigh.  0xffffffff is a valid return from h/w */
+       irq_stat = readl(mmio + HOST_IRQ_STAT);
+       if (!irq_stat)
+               return IRQ_NONE;
+
+       irq_masked = irq_stat & hpriv->port_map;
+
+       spin_lock(&host->lock);
+
+       /*
+        * HOST_IRQ_STAT behaves as edge triggered latch meaning that
+        * it should be cleared before all the port events are cleared.
+        */
+       writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+       rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
+
+       spin_unlock(&host->lock);
+
+       VPRINTK("EXIT\n");
+
+       return IRQ_RETVAL(rc);
+}
+
 static struct ata_port_operations xgene_ahci_v1_ops = {
        .inherits = &ahci_ops,
        .host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
                hpriv->flags = AHCI_HFLAG_NO_NCQ;
                break;
        case XGENE_AHCI_V2:
-               hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ;
+               hpriv->flags |= AHCI_HFLAG_YES_FBS;
+               hpriv->irq_handler = xgene_ahci_irq_intr;
                break;
        default:
                break;
index d61740e78d6dc93a1b17d9e3c9d9425067190a21..513b3fa74d7886949419448cc18a591cf4bcd4cd 100644 (file)
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
                                    const char *buf, size_t size);
 static ssize_t ahci_show_em_supported(struct device *dev,
                                      struct device_attribute *attr, char *buf);
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
 
 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -496,8 +497,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
                }
        }
 
-       /* fabricate port_map from cap.nr_ports */
-       if (!port_map) {
+       /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+       if (!port_map && vers < 0x10300) {
                port_map = (1 << ahci_nr_ports(cap)) - 1;
                dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
 
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
 
        if (!hpriv->start_engine)
                hpriv->start_engine = ahci_start_engine;
+
+       if (!hpriv->irq_handler)
+               hpriv->irq_handler = ahci_single_level_irq_intr;
 }
 EXPORT_SYMBOL_GPL(ahci_save_initial_config);
 
@@ -593,8 +597,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
 int ahci_stop_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        u32 tmp;
 
+       /*
+        * On some controllers, stopping a port's DMA engine while the port
+        * is in ALPM state (partial or slumber) results in failures on
+        * subsequent DMA engine starts.  For those controllers, put the
+        * port back in active state before stopping its DMA engine.
+        */
+       if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
+           (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
+           ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
+               dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
+               return -EIO;
+       }
+
        tmp = readl(port_mmio + PORT_CMD);
 
        /* check if the HBA is idle */
@@ -689,6 +707,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
        void __iomem *port_mmio = ahci_port_base(ap);
 
        if (policy != ATA_LPM_MAX_POWER) {
+               /* wakeup flag only applies to the max power policy */
+               hints &= ~ATA_LPM_WAKE_ONLY;
+
                /*
                 * Disable interrupts on Phy Ready. This keeps us from
                 * getting woken up due to spurious phy ready
@@ -704,7 +725,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
                u32 cmd = readl(port_mmio + PORT_CMD);
 
                if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
-                       cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
+                       if (!(hints & ATA_LPM_WAKE_ONLY))
+                               cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
                        cmd |= PORT_CMD_ICC_ACTIVE;
 
                        writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +734,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
 
                        /* wait 10ms to be sure we've come out of LPM state */
                        ata_msleep(ap, 10);
+
+                       if (hints & ATA_LPM_WAKE_ONLY)
+                               return 0;
                } else {
                        cmd |= PORT_CMD_ALPE;
                        if (policy == ATA_LPM_MIN_POWER)
@@ -1825,7 +1850,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
 {
        unsigned int i, handled = 0;
 
@@ -1851,43 +1876,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
 
        return handled;
 }
-
-static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
-{
-       struct ata_host *host = dev_instance;
-       struct ahci_host_priv *hpriv;
-       unsigned int rc = 0;
-       void __iomem *mmio;
-       u32 irq_stat, irq_masked;
-
-       VPRINTK("ENTER\n");
-
-       hpriv = host->private_data;
-       mmio = hpriv->mmio;
-
-       /* sigh.  0xffffffff is a valid return from h/w */
-       irq_stat = readl(mmio + HOST_IRQ_STAT);
-       if (!irq_stat)
-               return IRQ_NONE;
-
-       irq_masked = irq_stat & hpriv->port_map;
-
-       spin_lock(&host->lock);
-
-       /*
-        * HOST_IRQ_STAT behaves as edge triggered latch meaning that
-        * it should be cleared before all the port events are cleared.
-        */
-       writel(irq_stat, mmio + HOST_IRQ_STAT);
-
-       rc = ahci_handle_port_intr(host, irq_masked);
-
-       spin_unlock(&host->lock);
-
-       VPRINTK("EXIT\n");
-
-       return IRQ_RETVAL(rc);
-}
+EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
 
 static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
 {
@@ -2514,14 +2503,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
        int irq = hpriv->irq;
        int rc;
 
-       if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX))
+       if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+               if (hpriv->irq_handler)
+                       dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
+                                and custom irq handler implemented\n");
+
                rc = ahci_host_activate_multi_irqs(host, sht);
-       else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ)
-               rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr,
-                                      IRQF_SHARED, sht);
-       else
-               rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
+       } else {
+               rc = ata_host_activate(host, irq, hpriv->irq_handler,
                                       IRQF_SHARED, sht);
+       }
+
+
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_host_activate);
index cbb74719d2c1b80d61590ddf3fb2f9c71a31ce06..55e257c268ddde37677cf4ff85472f65e9a63c97 100644 (file)
@@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "SAMSUNG CD-ROM SN-124", "N001",      ATA_HORKAGE_NODMA },
        { "Seagate STT20000A", NULL,            ATA_HORKAGE_NODMA },
        { " 2GB ATA Flash Disk", "ADMA428M",    ATA_HORKAGE_NODMA },
+       { "VRFDFC22048UCHC-TE*", NULL,          ATA_HORKAGE_NODMA },
        /* Odd clown on sil3726/4726 PMPs */
        { "Config  Disk",       NULL,           ATA_HORKAGE_DISABLE },
 
index cdf6215a9a22beb93ede75a702797e12c5ad4870..051b6158d1b7f45b7116b9debc98376a9d4d240f 100644 (file)
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 {
        struct ata_port *ap = qc->ap;
-       unsigned long flags;
 
        if (ap->ops->error_handler) {
                if (in_wq) {
-                       spin_lock_irqsave(ap->lock, flags);
-
                        /* EH might have kicked in while host lock is
                         * released.
                         */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
                                } else
                                        ata_port_freeze(ap);
                        }
-
-                       spin_unlock_irqrestore(ap->lock, flags);
                } else {
                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
                                ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
                }
        } else {
                if (in_wq) {
-                       spin_lock_irqsave(ap->lock, flags);
                        ata_sff_irq_on(ap);
                        ata_qc_complete(qc);
-                       spin_unlock_irqrestore(ap->lock, flags);
                } else
                        ata_qc_complete(qc);
        }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 {
        struct ata_link *link = qc->dev->link;
        struct ata_eh_info *ehi = &link->eh_info;
-       unsigned long flags = 0;
        int poll_next;
 
+       lockdep_assert_held(ap->lock);
+
        WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
        /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
                        }
                }
 
-               /* Send the CDB (atapi) or the first data block (ata pio out).
-                * During the state transition, interrupt handler shouldn't
-                * be invoked before the data transfer is complete and
-                * hsm_task_state is changed. Hence, the following locking.
-                */
-               if (in_wq)
-                       spin_lock_irqsave(ap->lock, flags);
-
                if (qc->tf.protocol == ATA_PROT_PIO) {
                        /* PIO data out protocol.
                         * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
                        /* send CDB */
                        atapi_send_cdb(ap, qc);
 
-               if (in_wq)
-                       spin_unlock_irqrestore(ap->lock, flags);
-
                /* if polling, ata_sff_pio_task() handles the rest.
                 * otherwise, interrupt handler takes over from here.
                 */
@@ -1296,7 +1279,8 @@ fsm_start:
                break;
        default:
                poll_next = 0;
-               BUG();
+               WARN(true, "ata%d: SFF host state machine in invalid state %d",
+                    ap->print_id, ap->hsm_task_state);
        }
 
        return poll_next;
@@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
        u8 status;
        int poll_next;
 
+       spin_lock_irq(ap->lock);
+
        BUG_ON(ap->sff_pio_task_link == NULL);
        /* qc can be NULL if timeout occurred */
        qc = ata_qc_from_tag(ap, link->active_tag);
        if (!qc) {
                ap->sff_pio_task_link = NULL;
-               return;
+               goto out_unlock;
        }
 
 fsm_start:
@@ -1381,11 +1367,14 @@ fsm_start:
         */
        status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
        if (status & ATA_BUSY) {
+               spin_unlock_irq(ap->lock);
                ata_msleep(ap, 2);
+               spin_lock_irq(ap->lock);
+
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
                        ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
-                       return;
+                       goto out_unlock;
                }
        }
 
@@ -1402,6 +1391,8 @@ fsm_start:
         */
        if (poll_next)
                goto fsm_start;
+out_unlock:
+       spin_unlock_irq(ap->lock);
 }
 
 /**
index e3d4b059fcd14bcb8f4a9e21809c5cb7d30ededa..e347e7acd8edb440d284d6309187bbb80cd90cd2 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/of.h>
 #include <linux/gfp.h>
+#include <linux/pci.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -30,7 +31,6 @@
 #include <asm/macio.h>
 #include <asm/io.h>
 #include <asm/dbdma.h>
-#include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/mediabay.h>
index 89f5cf68d80a143198c2c253d6d21c0d9fb38388..2738039aae9eea49f1ce6c593dc0392199f7ebb9 100644 (file)
@@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
                if (mc->release)
                        mc->release(master, mc->data);
        }
+
+       kfree(match->compare);
 }
 
 static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
        if (match->alloc == num)
                return 0;
 
-       new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL);
+       new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
        if (match->compare) {
                memcpy(new, match->compare, sizeof(*new) *
                                            min(match->num, num));
-               devm_kfree(dev, match->compare);
+               kfree(match->compare);
        }
        match->compare = new;
        match->alloc = num;
@@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
 }
 EXPORT_SYMBOL(component_match_add_release);
 
+static void free_master(struct master *master)
+{
+       struct component_match *match = master->match;
+       int i;
+
+       list_del(&master->node);
+
+       if (match) {
+               for (i = 0; i < match->num; i++) {
+                       struct component *c = match->compare[i].component;
+                       if (c)
+                               c->master = NULL;
+               }
+       }
+
+       kfree(master);
+}
+
 int component_master_add_with_match(struct device *dev,
        const struct component_master_ops *ops,
        struct component_match *match)
@@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
 
        ret = try_to_bring_up_master(master, NULL);
 
-       if (ret < 0) {
-               /* Delete off the list if we weren't successful */
-               list_del(&master->node);
-               kfree(master);
-       }
+       if (ret < 0)
+               free_master(master);
+
        mutex_unlock(&component_mutex);
 
        return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
        const struct component_master_ops *ops)
 {
        struct master *master;
-       int i;
 
        mutex_lock(&component_mutex);
        master = __master_find(dev, ops);
        if (master) {
-               struct component_match *match = master->match;
-
                take_down_master(master);
-
-               list_del(&master->node);
-
-               if (match) {
-                       for (i = 0; i < match->num; i++) {
-                               struct component *c = match->compare[i].component;
-                               if (c)
-                                       c->master = NULL;
-                       }
-               }
-               kfree(master);
+               free_master(master);
        }
        mutex_unlock(&component_mutex);
 }
index 47c43386786b13229f4fbeb5cbdbfe03a2009804..279e53989374f065d01039caf110a14b95f887e0 100644 (file)
@@ -284,6 +284,7 @@ out_free_priv_data:
 
        return err;
 }
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
 
 /**
  * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -301,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
        msi_domain_free_irqs(dev->msi_domain, dev);
        platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
 }
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
 
 /**
  * platform_msi_get_host_data - Query the private data associated with
index 73d6e5d39e33e1edef9033d5e97b99ecc50a80cf..f437afa17f2b1d2933ed0882d7e7f291c6d86309 100644 (file)
@@ -558,10 +558,15 @@ static int platform_drv_probe(struct device *_dev)
                return ret;
 
        ret = dev_pm_domain_attach(_dev, true);
-       if (ret != -EPROBE_DEFER && drv->probe) {
-               ret = drv->probe(dev);
-               if (ret)
-                       dev_pm_domain_detach(_dev, true);
+       if (ret != -EPROBE_DEFER) {
+               if (drv->probe) {
+                       ret = drv->probe(dev);
+                       if (ret)
+                               dev_pm_domain_detach(_dev, true);
+               } else {
+                       /* don't fail if just dev_pm_domain_attach failed */
+                       ret = 0;
+               }
        }
 
        if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
index 93ed14cc22524ccdd04301dbe460e0958bb69acf..f6a9ad52cbbff0663e58540f33bb29e08abf0e96 100644 (file)
@@ -146,7 +146,7 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
        if (dev->pm_domain == pd)
                return;
 
-       WARN(device_is_bound(dev),
+       WARN(pd && device_is_bound(dev),
             "PM domains can only be changed for unbound devices\n");
        dev->pm_domain = pd;
        device_pm_check_callbacks(dev);
index 6ac9a7f33b640ae1105a72f76974e8939a30ae45..301b785f9f56f5d00b7f83798b6f7cce53bf0e03 100644 (file)
@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 
 /**
  * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
- * @genpd: PM domait to power off.
+ * @genpd: PM domain to power off.
  *
  * Queue up the execution of genpd_poweroff() unless it's already been done
  * before.
@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
        queue_work(pm_wq, &genpd->power_off_work);
 }
 
-static int genpd_poweron(struct generic_pm_domain *genpd);
-
 /**
- * __genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
  *
  * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  */
-static int __genpd_poweron(struct generic_pm_domain *genpd)
+static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
 {
        struct gpd_link *link;
        int ret = 0;
@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
         * with it.
         */
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sd_counter_inc(link->master);
+               struct generic_pm_domain *master = link->master;
+
+               genpd_sd_counter_inc(master);
+
+               mutex_lock_nested(&master->lock, depth + 1);
+               ret = genpd_poweron(master, depth + 1);
+               mutex_unlock(&master->lock);
 
-               ret = genpd_poweron(link->master);
                if (ret) {
-                       genpd_sd_counter_dec(link->master);
+                       genpd_sd_counter_dec(master);
                        goto err;
                }
        }
@@ -223,20 +227,6 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
        return ret;
 }
 
-/**
- * genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- */
-static int genpd_poweron(struct generic_pm_domain *genpd)
-{
-       int ret;
-
-       mutex_lock(&genpd->lock);
-       ret = __genpd_poweron(genpd);
-       mutex_unlock(&genpd->lock);
-       return ret;
-}
-
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
        return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -484,7 +474,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
        }
 
        mutex_lock(&genpd->lock);
-       ret = __genpd_poweron(genpd);
+       ret = genpd_poweron(genpd, 0);
        mutex_unlock(&genpd->lock);
 
        if (ret)
@@ -1339,8 +1329,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
        if (!link)
                return -ENOMEM;
 
-       mutex_lock(&genpd->lock);
-       mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+       mutex_lock(&subdomain->lock);
+       mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
 
        if (genpd->status == GPD_STATE_POWER_OFF
            &&  subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1363,8 +1353,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                genpd_sd_counter_inc(genpd);
 
  out:
-       mutex_unlock(&subdomain->lock);
        mutex_unlock(&genpd->lock);
+       mutex_unlock(&subdomain->lock);
        if (ret)
                kfree(link);
        return ret;
@@ -1385,7 +1375,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
        if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
                return -EINVAL;
 
-       mutex_lock(&genpd->lock);
+       mutex_lock(&subdomain->lock);
+       mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
 
        if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
                pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1398,22 +1389,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
                if (link->slave != subdomain)
                        continue;
 
-               mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
                list_del(&link->master_node);
                list_del(&link->slave_node);
                kfree(link);
                if (subdomain->status != GPD_STATE_POWER_OFF)
                        genpd_sd_counter_dec(genpd);
 
-               mutex_unlock(&subdomain->lock);
-
                ret = 0;
                break;
        }
 
 out:
        mutex_unlock(&genpd->lock);
+       mutex_unlock(&subdomain->lock);
 
        return ret;
 }
@@ -1818,8 +1806,10 @@ int genpd_dev_pm_attach(struct device *dev)
 
        dev->pm_domain->detach = genpd_dev_pm_detach;
        dev->pm_domain->sync = genpd_dev_pm_sync;
-       ret = genpd_poweron(pd);
 
+       mutex_lock(&pd->lock);
+       ret = genpd_poweron(pd, 0);
+       mutex_unlock(&pd->lock);
 out:
        return ret ? -EPROBE_DEFER : 0;
 }
index cf351d3dab1c3b7a9badf6b468092ca216c47012..ab711c2c3e0031ec3324e56942b32ed0129dd337 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/of.h>
 #include <linux/export.h>
+#include <linux/regulator/consumer.h>
 
 #include "opp.h"
 
@@ -229,6 +231,82 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 
+/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *opp;
+       struct regulator *reg;
+       unsigned long latency_ns = 0;
+       unsigned long min_uV = ~0, max_uV = 0;
+       int ret;
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               rcu_read_unlock();
+               return 0;
+       }
+
+       reg = dev_opp->regulator;
+       if (IS_ERR_OR_NULL(reg)) {
+               /* Regulator may not be required for device */
+               if (reg)
+                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+                               PTR_ERR(reg));
+               rcu_read_unlock();
+               return 0;
+       }
+
+       list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+               if (!opp->available)
+                       continue;
+
+               if (opp->u_volt_min < min_uV)
+                       min_uV = opp->u_volt_min;
+               if (opp->u_volt_max > max_uV)
+                       max_uV = opp->u_volt_max;
+       }
+
+       rcu_read_unlock();
+
+       /*
+        * The caller needs to ensure that dev_opp (and hence the regulator)
+        * isn't freed, while we are executing this routine.
+        */
+       ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+       if (ret > 0)
+               latency_ns = ret * 1000;
+
+       return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ *                                          nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+       return dev_pm_opp_get_max_volt_latency(dev) +
+               dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
 /**
  * dev_pm_opp_get_suspend_opp() - Get suspend opp
  * @dev:       device for which we do this operation
@@ -451,6 +529,182 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
+/*
+ * The caller needs to ensure that device_opp (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
+{
+       struct device_opp *dev_opp;
+       struct clk *clk;
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+               clk = ERR_CAST(dev_opp);
+               goto unlock;
+       }
+
+       clk = dev_opp->clk;
+       if (IS_ERR(clk))
+               dev_err(dev, "%s: No clock available for the device\n",
+                       __func__);
+
+unlock:
+       rcu_read_unlock();
+       return clk;
+}
+
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+                           unsigned long u_volt, unsigned long u_volt_min,
+                           unsigned long u_volt_max)
+{
+       int ret;
+
+       /* Regulator not available for device */
+       if (IS_ERR(reg)) {
+               dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+                       PTR_ERR(reg));
+               return 0;
+       }
+
+       dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+               u_volt, u_volt_max);
+
+       ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+                                           u_volt_max);
+       if (ret)
+               dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+                       __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+       return ret;
+}
+
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev:        device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *old_opp, *opp;
+       struct regulator *reg;
+       struct clk *clk;
+       unsigned long freq, old_freq;
+       unsigned long u_volt, u_volt_min, u_volt_max;
+       unsigned long ou_volt, ou_volt_min, ou_volt_max;
+       int ret;
+
+       if (unlikely(!target_freq)) {
+               dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+                       target_freq);
+               return -EINVAL;
+       }
+
+       clk = _get_opp_clk(dev);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       freq = clk_round_rate(clk, target_freq);
+       if ((long)freq <= 0)
+               freq = target_freq;
+
+       old_freq = clk_get_rate(clk);
+
+       /* Return early if nothing to do */
+       if (old_freq == freq) {
+               dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+                       __func__, freq);
+               return 0;
+       }
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+               rcu_read_unlock();
+               return PTR_ERR(dev_opp);
+       }
+
+       old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+       if (!IS_ERR(old_opp)) {
+               ou_volt = old_opp->u_volt;
+               ou_volt_min = old_opp->u_volt_min;
+               ou_volt_max = old_opp->u_volt_max;
+       } else {
+               dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+                       __func__, old_freq, PTR_ERR(old_opp));
+       }
+
+       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+       if (IS_ERR(opp)) {
+               ret = PTR_ERR(opp);
+               dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+                       __func__, freq, ret);
+               rcu_read_unlock();
+               return ret;
+       }
+
+       u_volt = opp->u_volt;
+       u_volt_min = opp->u_volt_min;
+       u_volt_max = opp->u_volt_max;
+
+       reg = dev_opp->regulator;
+
+       rcu_read_unlock();
+
+       /* Scaling up? Scale voltage before frequency */
+       if (freq > old_freq) {
+               ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+                                      u_volt_max);
+               if (ret)
+                       goto restore_voltage;
+       }
+
+       /* Change frequency */
+
+       dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+               __func__, old_freq, freq);
+
+       ret = clk_set_rate(clk, freq);
+       if (ret) {
+               dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+                       ret);
+               goto restore_voltage;
+       }
+
+       /* Scaling down? Scale voltage after frequency */
+       if (freq < old_freq) {
+               ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+                                      u_volt_max);
+               if (ret)
+                       goto restore_freq;
+       }
+
+       return 0;
+
+restore_freq:
+       if (clk_set_rate(clk, old_freq))
+               dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+                       __func__, old_freq);
+restore_voltage:
+       /* This shouldn't harm even if the voltages weren't updated earlier */
+       if (!IS_ERR(old_opp))
+               _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
 /* List-dev Helpers */
 static void _kfree_list_dev_rcu(struct rcu_head *head)
 {
@@ -505,6 +759,8 @@ static struct device_opp *_add_device_opp(struct device *dev)
 {
        struct device_opp *dev_opp;
        struct device_list_opp *list_dev;
+       struct device_node *np;
+       int ret;
 
        /* Check for existing list for 'dev' first */
        dev_opp = _find_device_opp(dev);
@@ -527,6 +783,30 @@ static struct device_opp *_add_device_opp(struct device *dev)
                return NULL;
        }
 
+       /*
+        * Only required for backward compatibility with v1 bindings, but isn't
+        * harmful for other cases. And so we do it unconditionally.
+        */
+       np = of_node_get(dev->of_node);
+       if (np) {
+               u32 val;
+
+               if (!of_property_read_u32(np, "clock-latency", &val))
+                       dev_opp->clock_latency_ns_max = val;
+               of_property_read_u32(np, "voltage-tolerance",
+                                    &dev_opp->voltage_tolerance_v1);
+               of_node_put(np);
+       }
+
+       /* Find clk for the device */
+       dev_opp->clk = clk_get(dev, NULL);
+       if (IS_ERR(dev_opp->clk)) {
+               ret = PTR_ERR(dev_opp->clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+                               ret);
+       }
+
        srcu_init_notifier_head(&dev_opp->srcu_head);
        INIT_LIST_HEAD(&dev_opp->opp_list);
 
@@ -565,6 +845,13 @@ static void _remove_device_opp(struct device_opp *dev_opp)
        if (dev_opp->prop_name)
                return;
 
+       if (!IS_ERR_OR_NULL(dev_opp->regulator))
+               return;
+
+       /* Release clk */
+       if (!IS_ERR(dev_opp->clk))
+               clk_put(dev_opp->clk);
+
        list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
                                    node);
 
@@ -683,6 +970,22 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
        return opp;
 }
 
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+                                        struct device_opp *dev_opp)
+{
+       struct regulator *reg = dev_opp->regulator;
+
+       if (!IS_ERR(reg) &&
+           !regulator_is_supported_voltage(reg, opp->u_volt_min,
+                                           opp->u_volt_max)) {
+               pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+                       __func__, opp->u_volt_min, opp->u_volt_max);
+               return false;
+       }
+
+       return true;
+}
+
 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
                    struct device_opp *dev_opp)
 {
@@ -724,6 +1027,12 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
                dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
                        __func__, ret);
 
+       if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
+               new_opp->available = false;
+               dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+                        __func__, new_opp->rate);
+       }
+
        return 0;
 }
 
@@ -759,6 +1068,7 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 {
        struct device_opp *dev_opp;
        struct dev_pm_opp *new_opp;
+       unsigned long tol;
        int ret;
 
        /* Hold our list modification lock here */
@@ -772,7 +1082,10 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 
        /* populate the opp table */
        new_opp->rate = freq;
+       tol = u_volt * dev_opp->voltage_tolerance_v1 / 100;
        new_opp->u_volt = u_volt;
+       new_opp->u_volt_min = u_volt - tol;
+       new_opp->u_volt_max = u_volt + tol;
        new_opp->available = true;
        new_opp->dynamic = dynamic;
 
@@ -1085,6 +1398,113 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
 
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+       struct device_opp *dev_opp;
+       struct regulator *reg;
+       int ret;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       dev_opp = _add_device_opp(dev);
+       if (!dev_opp) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       /* This should be called before OPPs are initialized */
+       if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
+               ret = -EBUSY;
+               goto err;
+       }
+
+       /* Already have a regulator set */
+       if (WARN_ON(!IS_ERR_OR_NULL(dev_opp->regulator))) {
+               ret = -EBUSY;
+               goto err;
+       }
+       /* Allocate the regulator */
+       reg = regulator_get_optional(dev, name);
+       if (IS_ERR(reg)) {
+               ret = PTR_ERR(reg);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "%s: no regulator (%s) found: %d\n",
+                               __func__, name, ret);
+               goto err;
+       }
+
+       dev_opp->regulator = reg;
+
+       mutex_unlock(&dev_opp_list_lock);
+       return 0;
+
+err:
+       _remove_device_opp(dev_opp);
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+       struct device_opp *dev_opp;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       /* Check for existing list for 'dev' first */
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+               goto unlock;
+       }
+
+       if (IS_ERR_OR_NULL(dev_opp->regulator)) {
+               dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating dev_opp */
+       WARN_ON(!list_empty(&dev_opp->opp_list));
+
+       regulator_put(dev_opp->regulator);
+       dev_opp->regulator = ERR_PTR(-EINVAL);
+
+       /* Try freeing device_opp if this was the last blocking resource */
+       _remove_device_opp(dev_opp);
+
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
 static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
                              struct device_node *np)
 {
index 690638ef36ee534c1ab9a57d8c4c7ee840f78a51..4f1bdfc7da03be2be60da43e62e21be3a6a55ea5 100644 (file)
@@ -22,6 +22,9 @@
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
+struct clk;
+struct regulator;
+
 /* Lock to allow exclusive modification to the device and opp lists */
 extern struct mutex dev_opp_list_lock;
 
@@ -132,9 +135,13 @@ struct device_list_opp {
  * @supported_hw: Array of version number to support.
  * @supported_hw_count: Number of elements in supported_hw array.
  * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
  * @dentry:    debugfs dentry pointer of the real device directory (not links).
  * @dentry_name: Name of the real dentry.
  *
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
+ *
  * This is an internal data structure maintaining the link to opps attached to
  * a device. This structure is not meant to be shared to users as it is
  * meant for book keeping and private to OPP library.
@@ -153,12 +160,18 @@ struct device_opp {
 
        struct device_node *np;
        unsigned long clock_latency_ns_max;
+
+       /* For backward compatibility with v1 bindings */
+       unsigned int voltage_tolerance_v1;
+
        bool shared_opp;
        struct dev_pm_opp *suspend_opp;
 
        unsigned int *supported_hw;
        unsigned int supported_hw_count;
        const char *prop_name;
+       struct clk *clk;
+       struct regulator *regulator;
 
 #ifdef CONFIG_DEBUG_FS
        struct dentry *dentry;
index 8812bfb9e3b89256f4a5d1468cf45484b18e4cd1..eea51569f0eb9c9e36f5a4bca90de61c8f833444 100644 (file)
@@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context,
        while (val_size) {
                switch (ctx->val_bytes) {
                case 1:
-                       __raw_writeb(*(u8 *)val, ctx->regs + offset);
+                       writeb(*(u8 *)val, ctx->regs + offset);
                        break;
                case 2:
-                       __raw_writew(*(u16 *)val, ctx->regs + offset);
+                       writew(*(u16 *)val, ctx->regs + offset);
                        break;
                case 4:
-                       __raw_writel(*(u32 *)val, ctx->regs + offset);
+                       writel(*(u32 *)val, ctx->regs + offset);
                        break;
 #ifdef CONFIG_64BIT
                case 8:
-                       __raw_writeq(*(u64 *)val, ctx->regs + offset);
+                       writeq(*(u64 *)val, ctx->regs + offset);
                        break;
 #endif
                default:
@@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context,
        while (val_size) {
                switch (ctx->val_bytes) {
                case 1:
-                       *(u8 *)val = __raw_readb(ctx->regs + offset);
+                       *(u8 *)val = readb(ctx->regs + offset);
                        break;
                case 2:
-                       *(u16 *)val = __raw_readw(ctx->regs + offset);
+                       *(u16 *)val = readw(ctx->regs + offset);
                        break;
                case 4:
-                       *(u32 *)val = __raw_readl(ctx->regs + offset);
+                       *(u32 *)val = readl(ctx->regs + offset);
                        break;
 #ifdef CONFIG_64BIT
                case 8:
-                       *(u64 *)val = __raw_readq(ctx->regs + offset);
+                       *(u64 *)val = readq(ctx->regs + offset);
                        break;
 #endif
                default:
index 38f156745d533a666deae90f167dd6d51d718d54..7e4ddfb076d3824ef1024b0d5381e312610835b1 100644 (file)
@@ -48,7 +48,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 #ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
 extern struct platform_device bcma_pflash_dev;
 #endif /* CONFIG_BCMA_DRIVER_MIPS */
 
index b7c8a8d4e6d1a232d44a28412a8b9ceba94e1ce9..b0f44a2937b9cb03d3ae8bbbebfc7035fe134ab2 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
                                         u32 mask, u32 value)
 {
@@ -115,6 +117,8 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
 
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
+
        if (cc->early_setup_done)
                return;
 
@@ -129,6 +133,9 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
        if (cc->capabilities & BCMA_CC_CAP_PMU)
                bcma_pmu_early_init(cc);
 
+       if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+               bcma_chipco_serial_init(cc);
+
        cc->early_setup_done = true;
 }
 
@@ -185,11 +192,12 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
                        ticks = 2;
                else if (ticks > maxt)
                        ticks = maxt;
-               bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
        } else {
                struct bcma_bus *bus = cc->core->bus;
 
                if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
+                   bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 &&
                    bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
                        bcma_core_set_clockmode(cc->core,
                                                ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
@@ -314,9 +322,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
        return res;
 }
 
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
+#if IS_BUILTIN(CONFIG_BCM47XX)
        unsigned int irq;
        u32 baud_base;
        u32 i;
@@ -358,5 +366,5 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
                ports[i].baud_base = baud_base;
                ports[i].reg_shift = 0;
        }
+#endif /* CONFIG_BCM47XX */
 }
-#endif /* CONFIG_BCMA_DRIVER_MIPS */
index fe0d48cb17788a9f3614fd8a36add7bac5f58848..f1eb4d3e1d575b2806c3d43155efb85f7640228d 100644 (file)
 
 u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
-       return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+       return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
 
 void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
 
 void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
                             u32 set)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
-       bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+       bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
 
 void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
                                 u32 offset, u32 mask, u32 set)
 {
-       bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
-       bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR);
+       bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
 
 void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
                                u32 set)
 {
-       bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
-       bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR);
+       bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
 
@@ -60,18 +60,18 @@ static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
 {
        u32 ilp_ctl, alp_hz;
 
-       if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) &
+       if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) &
              BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
                return 0;
 
-       bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
-                       BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
+       bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
+                        BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
        usleep_range(1000, 2000);
 
-       ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
+       ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
        ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
 
-       bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
 
        alp_hz = ilp_ctl * 32768 / 4;
        return (alp_hz + 50000) / 100000 * 100;
@@ -127,8 +127,8 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
                mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
                              BCMA_RES_4314_MACPHY_CLK_AVAIL);
 
-               bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
-               bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
+               bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
+               bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
                bcma_wait_value(cc->core, BCMA_CLKCTLST,
                                BCMA_CLKCTLST_HAVEHT, 0, 20000);
                break;
@@ -140,7 +140,7 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
 
        /* Flush */
        if (cc->pmu.rev >= 2)
-               bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+               bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
 
        /* TODO: Do we need to update OTP? */
 }
@@ -195,9 +195,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
 
        /* Set the resource masks. */
        if (min_msk)
-               bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
        if (max_msk)
-               bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
 
        /*
         * Add some delay; allow resources to come up and settle.
@@ -269,23 +269,33 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
 
 void bcma_pmu_early_init(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
        u32 pmucap;
 
-       pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
+       if (cc->core->id.rev >= 35 &&
+           cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+               cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
+               if (!cc->pmu.core)
+                       bcma_warn(bus, "Couldn't find expected PMU core");
+       }
+       if (!cc->pmu.core)
+               cc->pmu.core = cc->core;
+
+       pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP);
        cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
 
-       bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
-                  cc->pmu.rev, pmucap);
+       bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
+                  pmucap);
 }
 
 void bcma_pmu_init(struct bcma_drv_cc *cc)
 {
        if (cc->pmu.rev == 1)
-               bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
-                             ~BCMA_CC_PMU_CTL_NOILPONW);
+               bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL,
+                               ~BCMA_CC_PMU_CTL_NOILPONW);
        else
-               bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
-                            BCMA_CC_PMU_CTL_NOILPONW);
+               bcma_pmu_set32(cc, BCMA_CC_PMU_CTL,
+                              BCMA_CC_PMU_CTL_NOILPONW);
 
        bcma_pmu_pll_init(cc);
        bcma_pmu_resources_init(cc);
@@ -472,8 +482,8 @@ u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
 static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
                                         u32 value)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
 }
 
 void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
@@ -497,20 +507,20 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
                       bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
 
                /* RMW only the P1 divider */
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
                                BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
-               tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+               tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
                tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
                tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
 
                /* RMW only the int feedback divider */
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
                                BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
-               tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+               tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
                tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
                tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
 
                tmp = BCMA_CC_PMU_CTL_PLL_UPD;
                break;
@@ -646,7 +656,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
                break;
        }
 
-       tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
-       bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
+       tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp);
 }
 EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
index 7e11ef4cb7dbed69bd52685cf03a177fe6a8a01a..04d706ca5f439be4576c8f78afb4b10a66b5dd3e 100644 (file)
@@ -38,6 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
        { "M25P32", 0x15, 0x10000, 64, },
        { "M25P64", 0x16, 0x10000, 128, },
        { "M25FL128", 0x17, 0x10000, 256, },
+       { "MX25L25635F", 0x18, 0x10000, 512, },
        { NULL },
 };
 
index 98067f757fb0d9348fc8fb8f6fd25eb81e32bbf2..771a2a253440f736711bd996044d78b6ce3b723a 100644 (file)
@@ -192,6 +192,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        case BCMA_CHIP_ID_BCM4707:
        case BCMA_CHIP_ID_BCM5357:
        case BCMA_CHIP_ID_BCM53572:
+       case BCMA_CHIP_ID_BCM47094:
                chip->ngpio     = 32;
                break;
        default:
index 24424f3fef96d5784be6d5af59dbf40b3abc94e2..a40a203314db7278a1aaf81149d4463ad6381cc3 100644 (file)
@@ -337,12 +337,9 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
-       struct bcma_bus *bus = mcore->core->bus;
-
        if (mcore->early_setup_done)
                return;
 
-       bcma_chipco_serial_init(&bus->drv_cc);
        bcma_core_mips_flash_detect(mcore);
 
        mcore->early_setup_done = true;
index 0856189c065fd57826df7edab2dba3217d5e283c..cae5385cf4996c28353e55f0676f8aa9ba8ec1ac 100644 (file)
@@ -294,7 +294,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
index df806b9c549084fab8e61569718b2441485f17e4..4a2d1b235fb5af3e51ac8b044e6a18aed5753cec 100644 (file)
@@ -98,6 +98,9 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
        { BCMA_CORE_SHIM, "SHIM" },
        { BCMA_CORE_PCIE2, "PCIe Gen2" },
        { BCMA_CORE_ARM_CR4, "ARM CR4" },
+       { BCMA_CORE_GCI, "GCI" },
+       { BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" },
+       { BCMA_CORE_ARM_CA7, "ARM CA7" },
        { BCMA_CORE_DEFAULT, "Default" },
 };
 
@@ -315,6 +318,8 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                switch (core->id.id) {
                case BCMA_CORE_4706_MAC_GBIT_COMMON:
                case BCMA_CORE_NS_CHIPCOMMON_B:
+               case BCMA_CORE_PMU:
+               case BCMA_CORE_GCI:
                /* Not used yet: case BCMA_CORE_OOB_ROUTER: */
                        break;
                default:
index fa893c3ec4087f39382f3dc83b968c9859f41cca..ebd641b85396d63a7a07a2039b008008d7f0d459 100644 (file)
@@ -497,6 +497,7 @@ static int ath3k_probe(struct usb_interface *intf,
        /* match device ID in ath3k blacklist table */
        if (!id->driver_info) {
                const struct usb_device_id *match;
+
                match = usb_match_id(intf, ath3k_blist_tbl);
                if (match)
                        id = match;
index 129d47bcc5fc8d1b9d0ef0e087bbf36cee1336be..9a92c072a485def0b2730340503d1984d5a6dc1d 100644 (file)
@@ -132,7 +132,7 @@ config SUNXI_RSB
          and AC100/AC200 ICs.
 
 config UNIPHIER_SYSTEM_BUS
-       bool "UniPhier System Bus driver"
+       tristate "UniPhier System Bus driver"
        depends on ARCH_UNIPHIER && OF
        default y
        help
index 25996e2561105ac615cf4cc99c468f7a8db0f3df..795c9d9c96a6d5ae08c036a221ae1f9a4b1f9c1b 100644 (file)
@@ -330,7 +330,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
                cmd = RSB_CMD_RD32;
                break;
        default:
-               dev_err(rsb->dev, "Invalid access width: %d\n", len);
+               dev_err(rsb->dev, "Invalid access width: %zd\n", len);
                return -EINVAL;
        }
 
@@ -372,7 +372,7 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
                cmd = RSB_CMD_WR32;
                break;
        default:
-               dev_err(rsb->dev, "Invalid access width: %d\n", len);
+               dev_err(rsb->dev, "Invalid access width: %zd\n", len);
                return -EINVAL;
        }
 
index 6575c0fe6a4ea3a1e3bc1bae99010eefd9d7ff94..c3cb76b363c63c54d67343dacf4c9ec20032a95f 100644 (file)
@@ -192,8 +192,10 @@ static int __init vexpress_config_init(void)
        /* Need the config devices early, before the "normal" devices... */
        for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
                err = vexpress_config_populate(node);
-               if (err)
+               if (err) {
+                       of_node_put(node);
                        break;
+               }
        }
 
        return err;
index 05755441250c1de8495725ea80253957b71abd07..fdced547ad5961ff722cfb438c0e33de9f5fabb2 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/delay.h>
 #include <linux/vmalloc.h>
 #include <asm/uninorth.h>
-#include <asm/pci-bridge.h>
 #include <asm/prom.h>
 #include <asm/pmac_feature.h>
 #include "agp.h"
index 8d895908d608cc985dcd3306f52225d6f5e4fc6a..f7d89387bd6260ae994fadf3923e5209f185da37 100644 (file)
@@ -372,6 +372,7 @@ config HW_RANDOM_XGENE
 config HW_RANDOM_STM32
        tristate "STMicroelectronics STM32 random number generator"
        depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
+       depends on HAS_IOMEM
        help
          This driver provides kernel-side support for the Random Number
          Generator hardware found on STM32 microcontrollers.
index 9fda22e3387e59030b4a16f4ab1712bb1555b0ce..7fddd8696211f0320011c964a88a37a16133c4ba 100644 (file)
@@ -68,6 +68,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/acpi.h>
 
 #ifdef CONFIG_PARISC
 #include <asm/hardware.h>      /* for register_parisc_driver() stuff */
@@ -2054,8 +2055,6 @@ static int hardcode_find_bmc(void)
 
 #ifdef CONFIG_ACPI
 
-#include <linux/acpi.h>
-
 /*
  * Once we get an ACPI failure, we don't try any more, because we go
  * through the tables sequentially.  Once we don't find a table, there
index ebce98033fbb76ea687d439b5842e707f62eeb80..5759d75780cf70b4d30becc7fc442dd8cad1e4d1 100644 (file)
@@ -177,6 +177,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        GATE(0, "gpll_armclk", "gpll", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(0), 6, GFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        /*
         * Clock-Architecture Diagram 2
         */
@@ -187,6 +189,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(0), 8, GFLAGS),
        COMPOSITE_NOGATE(0, "ddrphy2x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+       FACTOR(0, "ddrphy", "ddrphy2x", 0, 1, 2),
 
        COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
@@ -263,6 +266,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        COMPOSITE(0, "aclk_vcodec", mux_pll_src_3plls_p, 0,
                        RK2928_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 11, GFLAGS),
+       FACTOR_GATE(HCLK_VCODEC, "hclk_vcodec", "aclk_vcodec", 0, 1, 4,
+                       RK2928_CLKGATE_CON(3), 12, GFLAGS),
 
        COMPOSITE(0, "aclk_hvec", mux_pll_src_3plls_p, 0,
                        RK2928_CLKSEL_CON(20), 0, 2, MFLAGS, 2, 5, DFLAGS,
@@ -351,6 +356,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
                        RK2928_CLKSEL_CON(21), 9, 5, DFLAGS,
                        RK2928_CLKGATE_CON(2), 6, GFLAGS),
+       FACTOR(0, "sclk_macref_out", "hclk_peri_src", 0, 1, 2),
 
        MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
                        RK2928_CLKSEL_CON(31), 0, 1, MFLAGS),
@@ -376,11 +382,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        GATE(ACLK_VIO, "aclk_vio", "aclk_disp1_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 13, GFLAGS),
        GATE(ACLK_LCDC, "aclk_lcdc", "aclk_disp1_pre", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
 
-       GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+       GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 12, GFLAGS),
        GATE(HCLK_LCDC, "hclk_lcdc", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
 
-       /* hclk_video gates */
-       GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(3), 12, GFLAGS),
 
        /* xin24m gates */
        GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK2928_CLKGATE_CON(10), 0, GFLAGS),
@@ -444,34 +448,11 @@ static void __init rk3036_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
        if (IS_ERR(clk))
                pr_warn("%s: could not register clock usb480m: %ld\n",
                        __func__, PTR_ERR(clk));
 
-       clk = clk_register_fixed_factor(NULL, "ddrphy", "ddrphy2x", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock ddrphy: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
-                                       "aclk_vcodec", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "sclk_macref_out",
-                                       "hclk_peri_src", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock sclk_macref_out: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        rockchip_clk_register_plls(rk3036_pll_clks,
                                   ARRAY_SIZE(rk3036_pll_clks),
                                   RK3036_GRF_SOC_STATUS0);
index 7f7444cbf6fcc0e62ae85c195792edc10daca2f3..40bab39014915087e691257caf0b063b72fd04bf 100644 (file)
@@ -339,13 +339,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        INVERTER(0, "pclk_cif0", "pclkin_cif0",
                        RK2928_CLKSEL_CON(30), 8, IFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        /*
         * the 480m are generated inside the usb block from these clocks,
         * but they are also a source for the hsicphy clock.
         */
-       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 5, GFLAGS),
-       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 6, GFLAGS),
 
        COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -605,7 +607,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
        GATE(SCLK_TIMER2, "timer2", "xin24m", 0,
                        RK2928_CLKGATE_CON(3), 2, GFLAGS),
 
-       COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+       COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
                        RK2928_CLKSEL_CON(34), 0, 16, DFLAGS,
                        RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
@@ -662,11 +664,11 @@ static struct clk_div_table div_rk3188_aclk_core_t[] = {
        { /* sentinel */ },
 };
 
-PNAME(mux_hsicphy_p)           = { "sclk_otgphy0", "sclk_otgphy1",
+PNAME(mux_hsicphy_p)           = { "sclk_otgphy0_480m", "sclk_otgphy1_480m",
                                    "gpll", "cpll" };
 
 static struct rockchip_clk_branch rk3188_i2s0_fracmux __initdata =
-       MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+       MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, CLK_SET_RATE_PARENT,
                        RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
 
 static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
@@ -722,7 +724,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
        COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
                        RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
                        RK2928_CLKGATE_CON(0), 9, GFLAGS),
-       COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", 0,
+       COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", CLK_SET_RATE_PARENT,
                        RK2928_CLKSEL_CON(7), 0,
                        RK2928_CLKGATE_CON(0), 10, GFLAGS,
                        &rk3188_i2s0_fracmux),
@@ -748,12 +750,12 @@ static const char *const rk3188_critical_clocks[] __initconst = {
        "hclk_peri",
        "pclk_cpu",
        "pclk_peri",
+       "hclk_cpubus"
 };
 
 static void __init rk3188_common_clk_init(struct device_node *np)
 {
        void __iomem *reg_base;
-       struct clk *clk;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -763,17 +765,6 @@ static void __init rk3188_common_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock usb480m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        rockchip_clk_register_branches(common_clk_branches,
                                  ARRAY_SIZE(common_clk_branches));
 
index 981a50205339609617c271556c9d3227c0b3bd2c..c515915850a1dc6a3bf8f0186fb9d13c2484ccaa 100644 (file)
@@ -187,7 +187,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(7), 1, GFLAGS),
        GATE(0, "ddrc", "ddrphy_pre", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(8), 5, GFLAGS),
-       GATE(0, "ddrphy", "ddrphy_pre", CLK_IGNORE_UNUSED,
+       FACTOR_GATE(0, "ddrphy", "ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
                        RK2928_CLKGATE_CON(7), 0, GFLAGS),
 
        /* PD_CORE */
@@ -240,13 +240,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
        COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
                        RK2928_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 11, GFLAGS),
-       GATE(0, "hclk_vpu_src", "aclk_vpu_pre", 0,
+       FACTOR_GATE(0, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4,
                        RK2928_CLKGATE_CON(4), 4, GFLAGS),
 
        COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
                        RK2928_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 2, GFLAGS),
-       GATE(0, "hclk_rkvdec_src", "aclk_rkvdec_pre", 0,
+       FACTOR_GATE(0, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4,
                        RK2928_CLKGATE_CON(4), 5, GFLAGS),
 
        COMPOSITE(0, "sclk_vdec_cabac", mux_pll_src_4plls_p, 0,
@@ -371,6 +371,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
        MUX(0, "dclk_vop", mux_dclk_vop_p, 0,
                        RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
                        RK2928_CLKSEL_CON(9), 15, 1, MFLAGS, 0, 7, DFLAGS,
                        RK2928_CLKGATE_CON(0), 3, GFLAGS),
@@ -605,13 +607,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
 
        /* PD_MMC */
        MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
-       MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
+       MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
 
        MMC(SCLK_SDIO_DRV,     "sdio_drv",     "sclk_sdio",  RK3228_SDIO_CON0,  1),
-       MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  1),
+       MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  0),
 
        MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3228_EMMC_CON0,  1),
-       MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  1),
+       MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  0),
 };
 
 static const char *const rk3228_critical_clocks[] __initconst = {
@@ -624,7 +626,6 @@ static const char *const rk3228_critical_clocks[] __initconst = {
 static void __init rk3228_clk_init(struct device_node *np)
 {
        void __iomem *reg_base;
-       struct clk *clk;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -634,29 +635,6 @@ static void __init rk3228_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                               __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "ddrphy_pre", "ddrphy4x", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock ddrphy_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_vpu_pre",
-                                       "hclk_vpu_src", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vpu_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_rkvdec_pre",
-                                       "hclk_rkvdec_src", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_rkvdec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        rockchip_clk_register_plls(rk3228_pll_clks,
                                   ARRAY_SIZE(rk3228_pll_clks),
                                   RK3228_GRF_SOC_STATUS0);
index 984fc187d12ecf6e80d23bb52d2868c75c0daeca..3cb72163a5122ba9ef7695f0c5ba5d679afa9bd7 100644 (file)
@@ -195,8 +195,8 @@ PNAME(mux_hsadcout_p)       = { "hsadc_src", "ext_hsadc" };
 PNAME(mux_edp_24m_p)   = { "ext_edp_24m", "xin24m" };
 PNAME(mux_tspout_p)    = { "cpll", "gpll", "npll", "xin27m" };
 
-PNAME(mux_usbphy480m_p)                = { "sclk_otgphy1", "sclk_otgphy2",
-                                   "sclk_otgphy0" };
+PNAME(mux_usbphy480m_p)                = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
+                                   "sclk_otgphy0_480m" };
 PNAME(mux_hsicphy480m_p)       = { "cpll", "gpll", "usbphy480m_src" };
 PNAME(mux_hsicphy12m_p)                = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
 
@@ -333,6 +333,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 7, GFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3288_CLKSEL_CON(4), 15, 1, MFLAGS, 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(4), 1, GFLAGS),
@@ -399,12 +401,10 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
         */
        GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vdpu", 0,
                RK3288_CLKGATE_CON(9), 0, GFLAGS),
-       /*
-        * We introduce a virtul node of hclk_vodec_pre_v to split one clock
-        * struct with a gate and a fix divider into two node in software.
-        */
-       GATE(0, "hclk_vcodec_pre_v", "aclk_vdpu", 0,
+
+       FACTOR_GATE(0, "hclk_vcodec_pre", "aclk_vdpu", 0, 1, 4,
                RK3288_CLKGATE_CON(3), 10, GFLAGS),
+
        GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
                RK3288_CLKGATE_CON(9), 1, GFLAGS),
 
@@ -537,11 +537,11 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
                        RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(4), 10, GFLAGS),
 
-       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 4, GFLAGS),
-       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 5, GFLAGS),
-       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "xin24m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 6, GFLAGS),
        GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 7, GFLAGS),
@@ -888,24 +888,6 @@ static void __init rk3288_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-
-       clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock usb480m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
-                                       "hclk_vcodec_pre_v", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
        clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
        if (IS_ERR(clk))
index be0ede52226994a0ae7248c8098285e0ebfd4a86..31facd8426f754640a63b2984cf27aa7cb4d74d6 100644 (file)
@@ -121,7 +121,7 @@ PNAME(mux_i2s_2ch_p)                = { "i2s_2ch_src", "i2s_2ch_frac",
                                    "dummy", "xin12m" };
 PNAME(mux_spdif_8ch_p)         = { "spdif_8ch_pre", "spdif_8ch_frac",
                                    "ext_i2s", "xin12m" };
-PNAME(mux_edp_24m_p)           = { "dummy", "xin24m" };
+PNAME(mux_edp_24m_p)           = { "xin24m", "dummy" };
 PNAME(mux_vip_out_p)           = { "vip_src", "xin24m" };
 PNAME(mux_usbphy480m_p)                = { "usbotg_out", "xin24m" };
 PNAME(mux_hsic_usbphy480m_p)   = { "usbotg_out", "dummy" };
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
        .core_reg = RK3368_CLKSEL_CON(0),
        .div_core_shift = 0,
        .div_core_mask = 0x1f,
-       .mux_core_shift = 15,
+       .mux_core_shift = 7,
 };
 
 static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
        }
 
 static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
-       RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
-       RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
-       RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
-       RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
-       RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
-       RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
-       RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
-       RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
-       RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
-       RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+       RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+       RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+       RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+       RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+       RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+       RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+       RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+       RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+       RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+       RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
 };
 
 static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
-       RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
-       RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
-       RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
-       RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
-       RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
-       RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
-       RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
-       RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
-       RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
-       RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+       RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+       RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+       RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+       RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+       RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+       RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+       RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+       RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+       RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+       RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
 };
 
 static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
@@ -248,6 +248,8 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
         * Clock-Architecture Diagram 2
         */
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(13), 8, 1, MFLAGS),
 
@@ -299,7 +301,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
                        RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t),
 
-       GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED,
+       FACTOR_GATE(0, "sclk_ddr", "ddrphy_src", CLK_IGNORE_UNUSED, 1, 4,
                        RK3368_CLKGATE_CON(6), 14, GFLAGS),
        GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED,
                        RK3368_CLKGATE_CON(6), 15, GFLAGS),
@@ -353,7 +355,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(32), 0,
                        RK3368_CLKGATE_CON(6), 5, GFLAGS),
-       COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
+       COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(31), 8, 2, MFLAGS,
                        RK3368_CLKGATE_CON(6), 6, GFLAGS),
        COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0,
@@ -362,7 +364,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(54), 0,
                        RK3368_CLKGATE_CON(5), 14, GFLAGS),
-       COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0,
+       COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(53), 8, 2, MFLAGS,
                        RK3368_CLKGATE_CON(5), 15, GFLAGS),
 
@@ -384,18 +386,18 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
         * Clock-Architecture Diagram 3
         */
 
-       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
                        RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3368_CLKGATE_CON(4), 6, GFLAGS),
-       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
                        RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK3368_CLKGATE_CON(4), 7, GFLAGS),
 
        /*
-        * We introduce a virtual node of hclk_vodec_pre_v to split one clock
-        * struct with a gate and a fix divider into two node in software.
+        * We use aclk_vdpu by default ---GRF_SOC_CON0[7] setting in system,
+        * so we ignore the mux and make clocks nodes as following,
         */
-       GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0,
+       FACTOR_GATE(0, "hclk_video_pre", "aclk_vdpu", 0, 1, 4,
                RK3368_CLKGATE_CON(4), 8, GFLAGS),
 
        COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
@@ -442,7 +444,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
                        RK3368_CLKGATE_CON(4), 13, GFLAGS),
        GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
-                       RK3368_CLKGATE_CON(5), 12, GFLAGS),
+                       RK3368_CLKGATE_CON(4), 12, GFLAGS),
 
        COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
@@ -842,24 +844,6 @@ static void __init rk3368_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by a cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       /* ddrphy_div4 is created by a cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_video_pre",
-                                       "hclk_video_pre_v", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        /* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
        clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
        if (IS_ERR(clk))
index d9a0b5d4d47f91137e78944de023b910972c4387..ab505247887013d8b5eac570c996e96627165f26 100644 (file)
@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
        if (gate_offset >= 0) {
                gate = kzalloc(sizeof(*gate), GFP_KERNEL);
                if (!gate)
-                       return ERR_PTR(-ENOMEM);
+                       goto err_gate;
 
                gate->flags = gate_flags;
                gate->reg = base + gate_offset;
@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
        if (div_width > 0) {
                div = kzalloc(sizeof(*div), GFP_KERNEL);
                if (!div)
-                       return ERR_PTR(-ENOMEM);
+                       goto err_div;
 
                div->flags = div_flags;
                div->reg = base + muxdiv_offset;
@@ -100,6 +100,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
                                     flags);
 
        return clk;
+err_div:
+       kfree(gate);
+err_gate:
+       kfree(mux);
+       return ERR_PTR(-ENOMEM);
 }
 
 struct rockchip_clk_frac {
@@ -260,6 +265,53 @@ static struct clk *rockchip_clk_register_frac_branch(const char *name,
        return clk;
 }
 
+static struct clk *rockchip_clk_register_factor_branch(const char *name,
+               const char *const *parent_names, u8 num_parents,
+               void __iomem *base, unsigned int mult, unsigned int div,
+               int gate_offset, u8 gate_shift, u8 gate_flags,
+               unsigned long flags, spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk_gate *gate = NULL;
+       struct clk_fixed_factor *fix = NULL;
+
+       /* without gate, register a simple factor clock */
+       if (gate_offset == 0) {
+               return clk_register_fixed_factor(NULL, name,
+                               parent_names[0], flags, mult,
+                               div);
+       }
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               return ERR_PTR(-ENOMEM);
+
+       gate->flags = gate_flags;
+       gate->reg = base + gate_offset;
+       gate->bit_idx = gate_shift;
+       gate->lock = lock;
+
+       fix = kzalloc(sizeof(*fix), GFP_KERNEL);
+       if (!fix) {
+               kfree(gate);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       fix->mult = mult;
+       fix->div = div;
+
+       clk = clk_register_composite(NULL, name, parent_names, num_parents,
+                                    NULL, NULL,
+                                    &fix->hw, &clk_fixed_factor_ops,
+                                    &gate->hw, &clk_gate_ops, flags);
+       if (IS_ERR(clk)) {
+               kfree(fix);
+               kfree(gate);
+       }
+
+       return clk;
+}
+
 static DEFINE_SPINLOCK(clk_lock);
 static struct clk **clk_table;
 static void __iomem *reg_base;
@@ -395,6 +447,14 @@ void __init rockchip_clk_register_branches(
                                reg_base + list->muxdiv_offset,
                                list->div_shift, list->div_flags, &clk_lock);
                        break;
+               case branch_factor:
+                       clk = rockchip_clk_register_factor_branch(
+                               list->name, list->parent_names,
+                               list->num_parents, reg_base,
+                               list->div_shift, list->div_width,
+                               list->gate_offset, list->gate_shift,
+                               list->gate_flags, flags, &clk_lock);
+                       break;
                }
 
                /* none of the cases above matched */
index ff8bd23a93ec2741ad00a40adbf0a462d836b82a..39c198bbcbee7be22f47b0c3f4f30bb297eea772 100644 (file)
@@ -254,6 +254,7 @@ enum rockchip_clk_branch_type {
        branch_gate,
        branch_mmc,
        branch_inverter,
+       branch_factor,
 };
 
 struct rockchip_clk_branch {
@@ -508,6 +509,33 @@ struct rockchip_clk_branch {
                .div_flags      = if,                           \
        }
 
+#define FACTOR(_id, cname, pname,  f, fm, fd)                  \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_factor,                \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .div_shift      = fm,                           \
+               .div_width      = fd,                           \
+       }
+
+#define FACTOR_GATE(_id, cname, pname,  f, fm, fd, go, gb, gf) \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_factor,                \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .div_shift      = fm,                           \
+               .div_width      = fd,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gb,                           \
+               .gate_flags     = gf,                           \
+       }
+
 void rockchip_clk_init(struct device_node *np, void __iomem *base,
                       unsigned long nr_clks);
 struct regmap *rockchip_clk_get_grf(void);
index 0481d5d673d68a718a8052762c66b78d482543eb..6b598c6a02136d64bee24e23bc0d61ec8d8733c6 100644 (file)
@@ -15,9 +15,9 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 #define SUNXI_OSC24M_GATE      0
 
@@ -61,7 +61,6 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
                goto err_free_gate;
 
        of_clk_add_provider(node, of_clk_src_simple_get, clk);
-       clk_register_clkdev(clk, clk_name, NULL);
 
        return;
 
index 1611b036421c20e76f438e13fb326111b24e5cda..3437f734c9bf1f7e2ba95051f2debbf71b3c3444 100644 (file)
@@ -17,7 +17,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
@@ -107,7 +106,6 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
                goto iounmap_reg;
 
        of_clk_add_provider(node, of_clk_src_simple_get, clk);
-       clk_register_clkdev(clk, clk_name, NULL);
 
        return;
 
index 59428dbd607a85d3ff15e3b648f529e1a575abf9..ddefe9668863b0ffbaebfc5e33c709c49c2486e2 100644 (file)
@@ -48,7 +48,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
        u32 reg;
        unsigned long rate;
        struct clk_factors *factors = to_clk_factors(hw);
-       struct clk_factors_config *config = factors->config;
+       const struct clk_factors_config *config = factors->config;
 
        /* Fetch the register value */
        reg = readl(factors->reg);
@@ -63,18 +63,28 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
        if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
                p = FACTOR_GET(config->pshift, config->pwidth, reg);
 
-       /* Calculate the rate */
-       rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
+       if (factors->recalc) {
+               struct factors_request factors_req = {
+                       .parent_rate = parent_rate,
+                       .n = n,
+                       .k = k,
+                       .m = m,
+                       .p = p,
+               };
 
-       return rate;
-}
+               /* get mux details from mux clk structure */
+               if (factors->mux)
+                       factors_req.parent_index =
+                               (reg >> factors->mux->shift) &
+                               factors->mux->mask;
 
-static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long *parent_rate)
-{
-       struct clk_factors *factors = to_clk_factors(hw);
-       factors->get_factors((u32 *)&rate, (u32)*parent_rate,
-                            NULL, NULL, NULL, NULL);
+               factors->recalc(&factors_req);
+
+               return factors_req.rate;
+       }
+
+       /* Calculate the rate */
+       rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
 
        return rate;
 }
@@ -82,6 +92,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
 static int clk_factors_determine_rate(struct clk_hw *hw,
                                      struct clk_rate_request *req)
 {
+       struct clk_factors *factors = to_clk_factors(hw);
        struct clk_hw *parent, *best_parent = NULL;
        int i, num_parents;
        unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
@@ -89,6 +100,10 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
        /* find the parent that can help provide the fastest rate <= rate */
        num_parents = clk_hw_get_num_parents(hw);
        for (i = 0; i < num_parents; i++) {
+               struct factors_request factors_req = {
+                       .rate = req->rate,
+                       .parent_index = i,
+               };
                parent = clk_hw_get_parent_by_index(hw, i);
                if (!parent)
                        continue;
@@ -97,8 +112,9 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
                else
                        parent_rate = clk_hw_get_rate(parent);
 
-               child_rate = clk_factors_round_rate(hw, req->rate,
-                                                   &parent_rate);
+               factors_req.parent_rate = parent_rate;
+               factors->get_factors(&factors_req);
+               child_rate = factors_req.rate;
 
                if (child_rate <= req->rate && child_rate > best_child_rate) {
                        best_parent = parent;
@@ -120,13 +136,16 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
 static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
                                unsigned long parent_rate)
 {
-       u8 n = 0, k = 0, m = 0, p = 0;
+       struct factors_request req = {
+               .rate = rate,
+               .parent_rate = parent_rate,
+       };
        u32 reg;
        struct clk_factors *factors = to_clk_factors(hw);
-       struct clk_factors_config *config = factors->config;
+       const struct clk_factors_config *config = factors->config;
        unsigned long flags = 0;
 
-       factors->get_factors((u32 *)&rate, (u32)parent_rate, &n, &k, &m, &p);
+       factors->get_factors(&req);
 
        if (factors->lock)
                spin_lock_irqsave(factors->lock, flags);
@@ -135,10 +154,10 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
        reg = readl(factors->reg);
 
        /* Set up the new factors - macros do not do anything if width is 0 */
-       reg = FACTOR_SET(config->nshift, config->nwidth, reg, n);
-       reg = FACTOR_SET(config->kshift, config->kwidth, reg, k);
-       reg = FACTOR_SET(config->mshift, config->mwidth, reg, m);
-       reg = FACTOR_SET(config->pshift, config->pwidth, reg, p);
+       reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n);
+       reg = FACTOR_SET(config->kshift, config->kwidth, reg, req.k);
+       reg = FACTOR_SET(config->mshift, config->mwidth, reg, req.m);
+       reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
 
        /* Apply them now */
        writel(reg, factors->reg);
@@ -155,7 +174,6 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
 static const struct clk_ops clk_factors_ops = {
        .determine_rate = clk_factors_determine_rate,
        .recalc_rate = clk_factors_recalc_rate,
-       .round_rate = clk_factors_round_rate,
        .set_rate = clk_factors_set_rate,
 };
 
@@ -172,7 +190,7 @@ struct clk *sunxi_factors_register(struct device_node *node,
        struct clk_hw *mux_hw = NULL;
        const char *clk_name = node->name;
        const char *parents[FACTORS_MAX_PARENTS];
-       int i = 0;
+       int ret, i = 0;
 
        /* if we have a mux, we will have >1 parents */
        i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
@@ -188,21 +206,22 @@ struct clk *sunxi_factors_register(struct device_node *node,
 
        factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
        if (!factors)
-               return NULL;
+               goto err_factors;
 
        /* set up factors properties */
        factors->reg = reg;
        factors->config = data->table;
        factors->get_factors = data->getter;
+       factors->recalc = data->recalc;
        factors->lock = lock;
 
        /* Add a gate if this factor clock can be gated */
        if (data->enable) {
                gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-               if (!gate) {
-                       kfree(factors);
-                       return NULL;
-               }
+               if (!gate)
+                       goto err_gate;
+
+               factors->gate = gate;
 
                /* set up gate properties */
                gate->reg = reg;
@@ -214,11 +233,10 @@ struct clk *sunxi_factors_register(struct device_node *node,
        /* Add a mux if this factor clock can be muxed */
        if (data->mux) {
                mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
-               if (!mux) {
-                       kfree(factors);
-                       kfree(gate);
-                       return NULL;
-               }
+               if (!mux)
+                       goto err_mux;
+
+               factors->mux = mux;
 
                /* set up gate properties */
                mux->reg = reg;
@@ -233,11 +251,44 @@ struct clk *sunxi_factors_register(struct device_node *node,
                        mux_hw, &clk_mux_ops,
                        &factors->hw, &clk_factors_ops,
                        gate_hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk))
+               goto err_register;
 
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
+       ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (ret)
+               goto err_provider;
 
        return clk;
+
+err_provider:
+       /* TODO: The composite clock stuff will leak a bit here. */
+       clk_unregister(clk);
+err_register:
+       kfree(mux);
+err_mux:
+       kfree(gate);
+err_gate:
+       kfree(factors);
+err_factors:
+       return NULL;
+}
+
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
+{
+       struct clk_hw *hw = __clk_get_hw(clk);
+       struct clk_factors *factors;
+       const char *name;
+
+       if (!hw)
+               return;
+
+       factors = to_clk_factors(hw);
+       name = clk_hw_get_name(hw);
+
+       of_clk_del_provider(node);
+       /* TODO: The composite clock stuff will leak a bit here. */
+       clk_unregister(clk);
+       kfree(factors->mux);
+       kfree(factors->gate);
+       kfree(factors);
 }
index 171085ab5513e4f724017d05d6f91929262a4f78..1e63c5b2d5f4cb6866127100be2274a7095641cd 100644 (file)
@@ -2,7 +2,6 @@
 #define __MACH_SUNXI_CLK_FACTORS_H
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/spinlock.h>
 
 #define SUNXI_FACTORS_NOT_APPLICABLE   (0)
@@ -19,21 +18,36 @@ struct clk_factors_config {
        u8 n_start;
 };
 
+struct factors_request {
+       unsigned long rate;
+       unsigned long parent_rate;
+       u8 parent_index;
+       u8 n;
+       u8 k;
+       u8 m;
+       u8 p;
+};
+
 struct factors_data {
        int enable;
        int mux;
        int muxmask;
-       struct clk_factors_config *table;
-       void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+       const struct clk_factors_config *table;
+       void (*getter)(struct factors_request *req);
+       void (*recalc)(struct factors_request *req);
        const char *name;
 };
 
 struct clk_factors {
        struct clk_hw hw;
        void __iomem *reg;
-       struct clk_factors_config *config;
-       void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+       const struct clk_factors_config *config;
+       void (*get_factors)(struct factors_request *req);
+       void (*recalc)(struct factors_request *req);
        spinlock_t *lock;
+       /* for cleanup */
+       struct clk_mux *mux;
+       struct clk_gate *gate;
 };
 
 struct clk *sunxi_factors_register(struct device_node *node,
@@ -41,4 +55,6 @@ struct clk *sunxi_factors_register(struct device_node *node,
                                   spinlock_t *lock,
                                   void __iomem *reg);
 
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk);
+
 #endif
index d167e1efb92761a0f223a6054787587d58b11152..b38d71cec74c78300fce175de198cbd3fcb12623 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_a10_get_mod0_factors(struct factors_request *req)
 {
        u8 div, calcm, calcp;
 
        /* These clocks can only divide, so we will never be able to achieve
         * frequencies higher than the parent frequency */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
+       if (req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        if (div < 16)
                calcp = 0;
@@ -51,18 +51,13 @@ static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
 
        calcm = DIV_ROUND_UP(div, 1 << calcp);
 
-       *freq = (parent_rate >> calcp) / calcm;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm - 1;
-       *p = calcp;
+       req->rate = (req->parent_rate >> calcp) / calcm;
+       req->m = calcm - 1;
+       req->p = calcp;
 }
 
 /* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun4i_a10_mod0_config = {
+static const struct clk_factors_config sun4i_a10_mod0_config = {
        .mshift = 0,
        .mwidth = 4,
        .pshift = 16,
index f4da52b5ca0e838d37531a4cccf1e2d1941180f5..2cfc5a8a55340c4131b77443f4ac0f142f543be6 100644 (file)
@@ -130,6 +130,8 @@ CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
               sunxi_simple_gates_init);
 CLK_OF_DECLARE(sun8i_a33_ahb1, "allwinner,sun8i-a33-ahb1-gates-clk",
               sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_apb0, "allwinner,sun8i-a83t-apb0-gates-clk",
+              sunxi_simple_gates_init);
 CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
               sunxi_simple_gates_init);
 CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
index 23d042aabb4f7724d2cf1602b8a2b1f4028a8c41..68021fa5ecd9af5026a941940e0496329b567d5a 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -87,7 +86,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
                                                      clk_parent, 0, reg, i,
                                                      0, NULL);
                WARN_ON(IS_ERR(clk_data->clks[i]));
-               clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
 
                j++;
        }
index 20887686bdbee09fe7fbc90963f179ed45c651ab..84a187e55360fce909cdb5564aeffa423193d84c 100644 (file)
  *
  */
 
+#include <linux/bitops.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
-#define SUN6I_AR100_MAX_PARENTS                4
-#define SUN6I_AR100_SHIFT_MASK         0x3
-#define SUN6I_AR100_SHIFT_MAX          SUN6I_AR100_SHIFT_MASK
-#define SUN6I_AR100_SHIFT_SHIFT                4
-#define SUN6I_AR100_DIV_MASK           0x1f
-#define SUN6I_AR100_DIV_MAX            (SUN6I_AR100_DIV_MASK + 1)
-#define SUN6I_AR100_DIV_SHIFT          8
-#define SUN6I_AR100_MUX_MASK           0x3
-#define SUN6I_AR100_MUX_SHIFT          16
-
-struct ar100_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-};
-
-static inline struct ar100_clk *to_ar100_clk(struct clk_hw *hw)
-{
-       return container_of(hw, struct ar100_clk, hw);
-}
-
-static unsigned long ar100_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate)
-{
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       u32 val = readl(clk->reg);
-       int shift = (val >> SUN6I_AR100_SHIFT_SHIFT) & SUN6I_AR100_SHIFT_MASK;
-       int div = (val >> SUN6I_AR100_DIV_SHIFT) & SUN6I_AR100_DIV_MASK;
-
-       return (parent_rate >> shift) / (div + 1);
-}
-
-static int ar100_determine_rate(struct clk_hw *hw,
-                               struct clk_rate_request *req)
-{
-       int nparents = clk_hw_get_num_parents(hw);
-       long best_rate = -EINVAL;
-       int i;
-
-       req->best_parent_hw = NULL;
-
-       for (i = 0; i < nparents; i++) {
-               unsigned long parent_rate;
-               unsigned long tmp_rate;
-               struct clk_hw *parent;
-               unsigned long div;
-               int shift;
-
-               parent = clk_hw_get_parent_by_index(hw, i);
-               parent_rate = clk_hw_get_rate(parent);
-               div = DIV_ROUND_UP(parent_rate, req->rate);
-
-               /*
-                * The AR100 clk contains 2 divisors:
-                * - one power of 2 divisor
-                * - one regular divisor
-                *
-                * First check if we can safely shift (or divide by a power
-                * of 2) without losing precision on the requested rate.
-                */
-               shift = ffs(div) - 1;
-               if (shift > SUN6I_AR100_SHIFT_MAX)
-                       shift = SUN6I_AR100_SHIFT_MAX;
-
-               div >>= shift;
-
-               /*
-                * Then if the divisor is still bigger than what the HW
-                * actually supports, use a bigger shift (or power of 2
-                * divider) value and accept to lose some precision.
-                */
-               while (div > SUN6I_AR100_DIV_MAX) {
-                       shift++;
-                       div >>= 1;
-                       if (shift > SUN6I_AR100_SHIFT_MAX)
-                               break;
-               }
-
-               /*
-                * If the shift value (or power of 2 divider) is bigger
-                * than what the HW actually support, skip this parent.
-                */
-               if (shift > SUN6I_AR100_SHIFT_MAX)
-                       continue;
-
-               tmp_rate = (parent_rate >> shift) / div;
-               if (!req->best_parent_hw || tmp_rate > best_rate) {
-                       req->best_parent_hw = parent;
-                       req->best_parent_rate = parent_rate;
-                       best_rate = tmp_rate;
-               }
-       }
-
-       if (best_rate < 0)
-               return best_rate;
-
-       req->rate = best_rate;
-
-       return 0;
-}
-
-static int ar100_set_parent(struct clk_hw *hw, u8 index)
-{
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       u32 val = readl(clk->reg);
-
-       if (index >= SUN6I_AR100_MAX_PARENTS)
-               return -EINVAL;
-
-       val &= ~(SUN6I_AR100_MUX_MASK << SUN6I_AR100_MUX_SHIFT);
-       val |= (index << SUN6I_AR100_MUX_SHIFT);
-       writel(val, clk->reg);
-
-       return 0;
-}
+#include "clk-factors.h"
 
-static u8 ar100_get_parent(struct clk_hw *hw)
-{
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       return (readl(clk->reg) >> SUN6I_AR100_MUX_SHIFT) &
-              SUN6I_AR100_MUX_MASK;
-}
-
-static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
-                         unsigned long parent_rate)
+/**
+ * sun6i_get_ar100_factors - Calculates factors p, m for AR100
+ *
+ * AR100 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+static void sun6i_get_ar100_factors(struct factors_request *req)
 {
-       unsigned long div = parent_rate / rate;
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       u32 val = readl(clk->reg);
+       unsigned long div;
        int shift;
 
-       if (parent_rate % rate)
-               return -EINVAL;
+       /* clock only divides */
+       if (req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
 
-       shift = ffs(div) - 1;
-       if (shift > SUN6I_AR100_SHIFT_MAX)
-               shift = SUN6I_AR100_SHIFT_MAX;
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
-       div >>= shift;
+       if (div < 32)
+               shift = 0;
+       else if (div >> 1 < 32)
+               shift = 1;
+       else if (div >> 2 < 32)
+               shift = 2;
+       else
+               shift = 3;
 
-       if (div > SUN6I_AR100_DIV_MAX)
-               return -EINVAL;
+       div >>= shift;
 
-       val &= ~((SUN6I_AR100_SHIFT_MASK << SUN6I_AR100_SHIFT_SHIFT) |
-                (SUN6I_AR100_DIV_MASK << SUN6I_AR100_DIV_SHIFT));
-       val |= (shift << SUN6I_AR100_SHIFT_SHIFT) |
-              (div << SUN6I_AR100_DIV_SHIFT);
-       writel(val, clk->reg);
+       if (div > 32)
+               div = 32;
 
-       return 0;
+       req->rate = (req->parent_rate >> shift) / div;
+       req->m = div - 1;
+       req->p = shift;
 }
 
-static struct clk_ops ar100_ops = {
-       .recalc_rate = ar100_recalc_rate,
-       .determine_rate = ar100_determine_rate,
-       .set_parent = ar100_set_parent,
-       .get_parent = ar100_get_parent,
-       .set_rate = ar100_set_rate,
+static const struct clk_factors_config sun6i_ar100_config = {
+       .mwidth = 5,
+       .mshift = 8,
+       .pwidth = 2,
+       .pshift = 4,
 };
 
+static const struct factors_data sun6i_ar100_data = {
+       .mux = 16,
+       .muxmask = GENMASK(1, 0),
+       .table = &sun6i_ar100_config,
+       .getter = sun6i_get_ar100_factors,
+};
+
+static DEFINE_SPINLOCK(sun6i_ar100_lock);
+
 static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
 {
-       const char *parents[SUN6I_AR100_MAX_PARENTS];
        struct device_node *np = pdev->dev.of_node;
-       const char *clk_name = np->name;
-       struct clk_init_data init;
-       struct ar100_clk *ar100;
        struct resource *r;
+       void __iomem *reg;
        struct clk *clk;
-       int nparents;
-
-       ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
-       if (!ar100)
-               return -ENOMEM;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ar100->reg = devm_ioremap_resource(&pdev->dev, r);
-       if (IS_ERR(ar100->reg))
-               return PTR_ERR(ar100->reg);
+       reg = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
 
-       nparents = of_clk_get_parent_count(np);
-       if (nparents > SUN6I_AR100_MAX_PARENTS)
-               nparents = SUN6I_AR100_MAX_PARENTS;
-
-       of_clk_parent_fill(np, parents, nparents);
+       clk = sunxi_factors_register(np, &sun6i_ar100_data, &sun6i_ar100_lock,
+                                    reg);
+       if (!clk)
+               return -ENOMEM;
 
-       of_property_read_string(np, "clock-output-names", &clk_name);
+       platform_set_drvdata(pdev, clk);
 
-       init.name = clk_name;
-       init.ops = &ar100_ops;
-       init.parent_names = parents;
-       init.num_parents = nparents;
-       init.flags = 0;
+       return 0;
+}
 
-       ar100->hw.init = &init;
+static int sun6i_a31_ar100_clk_remove(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct clk *clk = platform_get_drvdata(pdev);
 
-       clk = clk_register(&pdev->dev, &ar100->hw);
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
+       sunxi_factors_unregister(np, clk);
 
-       return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       return 0;
 }
 
 static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
@@ -227,6 +113,7 @@ static struct platform_driver sun6i_a31_ar100_clk_driver = {
                .of_match_table = sun6i_a31_ar100_clk_dt_ids,
        },
        .probe = sun6i_a31_ar100_clk_probe,
+       .remove = sun6i_a31_ar100_clk_remove,
 };
 module_platform_driver(sun6i_a31_ar100_clk_driver);
 
index e32d18ba252be91ad308c66f10828a5ff7adcf36..63fdb790df2938ba1f1c35df96020926820ad065 100644 (file)
@@ -17,7 +17,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -110,3 +109,5 @@ err_unmap:
 
 CLK_OF_DECLARE(sun8i_h3_bus_gates, "allwinner,sun8i-h3-bus-gates-clk",
               sun8i_h3_bus_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_bus_gates, "allwinner,sun8i-a83t-bus-gates-clk",
+              sun8i_h3_bus_gates_init);
index bf117a636d2397f6a7059f55962134cade4b3cc0..3aaa9cbef791a910d14965b6858d91067da94091 100644 (file)
  */
 
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
 #include <linux/of_address.h>
 
-#include "clk-factors.h"
+#define SUN8I_MBUS_ENABLE      31
+#define SUN8I_MBUS_MUX_SHIFT   24
+#define SUN8I_MBUS_MUX_MASK    0x3
+#define SUN8I_MBUS_DIV_SHIFT   0
+#define SUN8I_MBUS_DIV_WIDTH   3
+#define SUN8I_MBUS_MAX_PARENTS 4
 
-/**
- * sun8i_a23_get_mbus_factors() - calculates m factor for MBUS clocks
- * MBUS rate is calculated as follows
- * rate = parent_rate / (m + 1);
- */
+static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
 
-static void sun8i_a23_get_mbus_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void __init sun8i_a23_mbus_setup(struct device_node *node)
 {
-       u8 div;
-
-       /*
-        * These clocks can only divide, so we will never be able to
-        * achieve frequencies higher than the parent frequency
-        */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
-
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       int num_parents = of_clk_get_parent_count(node);
+       const char *parents[num_parents];
+       const char *clk_name = node->name;
+       struct resource res;
+       struct clk_divider *div;
+       struct clk_gate *gate;
+       struct clk_mux *mux;
+       struct clk *clk;
+       void __iomem *reg;
+       int err;
 
-       if (div > 8)
-               div = 8;
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for sun8i-mbus-clk\n");
+               return;
+       }
 
-       *freq = parent_rate / div;
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               goto err_unmap;
 
-       /* we were called to round the frequency, we can now return */
-       if (m == NULL)
-               return;
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               goto err_free_div;
 
-       *m = div - 1;
-}
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               goto err_free_mux;
 
-static struct clk_factors_config sun8i_a23_mbus_config = {
-       .mshift = 0,
-       .mwidth = 3,
-};
+       of_property_read_string(node, "clock-output-names", &clk_name);
+       of_clk_parent_fill(node, parents, num_parents);
 
-static const struct factors_data sun8i_a23_mbus_data __initconst = {
-       .enable = 31,
-       .mux = 24,
-       .muxmask = BIT(1) | BIT(0),
-       .table = &sun8i_a23_mbus_config,
-       .getter = sun8i_a23_get_mbus_factors,
-};
+       gate->reg = reg;
+       gate->bit_idx = SUN8I_MBUS_ENABLE;
+       gate->lock = &sun8i_a23_mbus_lock;
 
-static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
+       div->reg = reg;
+       div->shift = SUN8I_MBUS_DIV_SHIFT;
+       div->width = SUN8I_MBUS_DIV_WIDTH;
+       div->lock = &sun8i_a23_mbus_lock;
 
-static void __init sun8i_a23_mbus_setup(struct device_node *node)
-{
-       struct clk *mbus;
-       void __iomem *reg;
+       mux->reg = reg;
+       mux->shift = SUN8I_MBUS_MUX_SHIFT;
+       mux->mask = SUN8I_MBUS_MUX_MASK;
+       mux->lock = &sun8i_a23_mbus_lock;
 
-       reg = of_iomap(node, 0);
-       if (!reg) {
-               pr_err("Could not get registers for a23-mbus-clk\n");
-               return;
-       }
+       clk = clk_register_composite(NULL, clk_name, parents, num_parents,
+                                    &mux->hw, &clk_mux_ops,
+                                    &div->hw, &clk_divider_ops,
+                                    &gate->hw, &clk_gate_ops,
+                                    0);
+       if (IS_ERR(clk))
+               goto err_free_gate;
 
-       mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
-                                     &sun8i_a23_mbus_lock, reg);
+       err = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (err)
+               goto err_unregister_clk;
 
        /* The MBUS clocks needs to be always enabled */
-       __clk_get(mbus);
-       clk_prepare_enable(mbus);
+       __clk_get(clk);
+       clk_prepare_enable(clk);
+
+       return;
+
+err_unregister_clk:
+       /* TODO: The composite clock stuff will leak a bit here. */
+       clk_unregister(clk);
+err_free_gate:
+       kfree(gate);
+err_free_mux:
+       kfree(mux);
+err_free_div:
+       kfree(div);
+err_unmap:
+       iounmap(reg);
+       of_address_to_resource(node, 0, &res);
+       release_mem_region(res.start, resource_size(&res));
 }
 CLK_OF_DECLARE(sun8i_a23_mbus, "allwinner,sun8i-a23-mbus-clk", sun8i_a23_mbus_setup);
index 6c4c98324d3cc6800bace1b4a918707e2b4cc53f..43f014f85803109aa5cca03dbe045d05a7a4b120 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
  * p and m are named div1 and div2 in Allwinner's SDK
  */
 
-static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
+static void sun9i_a80_get_pll4_factors(struct factors_request *req)
 {
        int n;
        int m = 1;
        int p = 1;
 
        /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
-       n = DIV_ROUND_UP(*freq, 6000000);
+       n = DIV_ROUND_UP(req->rate, 6000000);
 
        /* If n is too large switch to steps of 12 MHz */
        if (n > 255) {
@@ -60,18 +60,13 @@ static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
        else if (n < 12)
                n = 12;
 
-       *freq = ((24000000 * n) >> p) / (m + 1);
-
-       /* we were called to round the frequency, we can now return */
-       if (n_ret == NULL)
-               return;
-
-       *n_ret = n;
-       *m_ret = m;
-       *p_ret = p;
+       req->rate = ((24000000 * n) >> p) / (m + 1);
+       req->n = n;
+       req->m = m;
+       req->p = p;
 }
 
-static struct clk_factors_config sun9i_a80_pll4_config = {
+static const struct clk_factors_config sun9i_a80_pll4_config = {
        .mshift = 18,
        .mwidth = 1,
        .nshift = 8,
@@ -111,30 +106,24 @@ CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_se
  * rate = parent_rate / (m + 1);
  */
 
-static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
-                                    u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_gt_factors(struct factors_request *req)
 {
        u32 div;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        /* maximum divider is 4 */
        if (div > 4)
                div = 4;
 
-       *freq = parent_rate / div;
-
-       /* we were called to round the frequency, we can now return */
-       if (!m)
-               return;
-
-       *m = div;
+       req->rate = req->parent_rate / div;
+       req->m = div;
 }
 
-static struct clk_factors_config sun9i_a80_gt_config = {
+static const struct clk_factors_config sun9i_a80_gt_config = {
        .mshift = 0,
        .mwidth = 2,
 };
@@ -176,30 +165,24 @@ CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
  * rate = parent_rate >> p;
  */
 
-static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
-                                     u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_ahb_factors(struct factors_request *req)
 {
        u32 _p;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+       _p = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
 
        /* maximum p is 3 */
        if (_p > 3)
                _p = 3;
 
-       *freq = parent_rate >> _p;
-
-       /* we were called to round the frequency, we can now return */
-       if (!p)
-               return;
-
-       *p = _p;
+       req->rate = req->parent_rate >> _p;
+       req->p = _p;
 }
 
-static struct clk_factors_config sun9i_a80_ahb_config = {
+static const struct clk_factors_config sun9i_a80_ahb_config = {
        .pshift = 0,
        .pwidth = 2,
 };
@@ -262,34 +245,25 @@ CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_se
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_apb1_factors(struct factors_request *req)
 {
        u32 div;
-       u8 calcm, calcp;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        /* Highest possible divider is 256 (p = 3, m = 31) */
        if (div > 256)
                div = 256;
 
-       calcp = order_base_2(div);
-       calcm = (parent_rate >> calcp) - 1;
-       *freq = (parent_rate >> calcp) / (calcm + 1);
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm;
-       *p = calcp;
+       req->p = order_base_2(div);
+       req->m = (req->parent_rate >> req->p) - 1;
+       req->rate = (req->parent_rate >> req->p) / (req->m + 1);
 }
 
-static struct clk_factors_config sun9i_a80_apb1_config = {
+static const struct clk_factors_config sun9i_a80_apb1_config = {
        .mshift = 0,
        .mwidth = 5,
        .pshift = 16,
index 5ba2188ee99ccaa1f806113073c2e21b17813e5f..49ce2830489b6a75af2ba1281e8ed47469d5f8b4 100644 (file)
 
 static DEFINE_SPINLOCK(clk_lock);
 
-/**
- * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
- */
-
-#define SUN6I_AHB1_MAX_PARENTS         4
-#define SUN6I_AHB1_MUX_PARENT_PLL6     3
-#define SUN6I_AHB1_MUX_SHIFT           12
-/* un-shifted mask is what mux_clk expects */
-#define SUN6I_AHB1_MUX_MASK            0x3
-#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
-                                        SUN6I_AHB1_MUX_MASK)
-
-#define SUN6I_AHB1_DIV_SHIFT           4
-#define SUN6I_AHB1_DIV_MASK            (0x3 << SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_GET(reg)                ((reg & SUN6I_AHB1_DIV_MASK) >> \
-                                               SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_SET(reg, div)   ((reg & ~SUN6I_AHB1_DIV_MASK) | \
-                                               (div << SUN6I_AHB1_DIV_SHIFT))
-#define SUN6I_AHB1_PLL6_DIV_SHIFT      6
-#define SUN6I_AHB1_PLL6_DIV_MASK       (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_GET(reg)   ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
-                                               SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
-                                               (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
-
-struct sun6i_ahb1_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-};
-
-#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
-
-static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
-                                               unsigned long parent_rate)
-{
-       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
-       unsigned long rate;
-       u32 reg;
-
-       /* Fetch the register value */
-       reg = readl(ahb1->reg);
-
-       /* apply pre-divider first if parent is pll6 */
-       if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
-               parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
-
-       /* clk divider */
-       rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
-
-       return rate;
-}
-
-static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
-                                u8 parent, unsigned long parent_rate)
-{
-       u8 div, calcp, calcm = 1;
-
-       /*
-        * clock can only divide, so we will never be able to achieve
-        * frequencies higher than the parent frequency
-        */
-       if (parent_rate && rate > parent_rate)
-               rate = parent_rate;
-
-       div = DIV_ROUND_UP(parent_rate, rate);
-
-       /* calculate pre-divider if parent is pll6 */
-       if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
-               if (div < 4)
-                       calcp = 0;
-               else if (div / 2 < 4)
-                       calcp = 1;
-               else if (div / 4 < 4)
-                       calcp = 2;
-               else
-                       calcp = 3;
-
-               calcm = DIV_ROUND_UP(div, 1 << calcp);
-       } else {
-               calcp = __roundup_pow_of_two(div);
-               calcp = calcp > 3 ? 3 : calcp;
-       }
-
-       /* we were asked to pass back divider values */
-       if (divp) {
-               *divp = calcp;
-               *pre_divp = calcm - 1;
-       }
-
-       return (parent_rate / calcm) >> calcp;
-}
-
-static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw,
-                                        struct clk_rate_request *req)
-{
-       struct clk_hw *parent, *best_parent = NULL;
-       int i, num_parents;
-       unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
-
-       /* find the parent that can help provide the fastest rate <= rate */
-       num_parents = clk_hw_get_num_parents(hw);
-       for (i = 0; i < num_parents; i++) {
-               parent = clk_hw_get_parent_by_index(hw, i);
-               if (!parent)
-                       continue;
-               if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
-                       parent_rate = clk_hw_round_rate(parent, req->rate);
-               else
-                       parent_rate = clk_hw_get_rate(parent);
-
-               child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i,
-                                                 parent_rate);
-
-               if (child_rate <= req->rate && child_rate > best_child_rate) {
-                       best_parent = parent;
-                       best = parent_rate;
-                       best_child_rate = child_rate;
-               }
-       }
-
-       if (!best_parent)
-               return -EINVAL;
-
-       req->best_parent_hw = best_parent;
-       req->best_parent_rate = best;
-       req->rate = best_child_rate;
-
-       return 0;
-}
-
-static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long parent_rate)
-{
-       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
-       unsigned long flags;
-       u8 div, pre_div, parent;
-       u32 reg;
-
-       spin_lock_irqsave(&clk_lock, flags);
-
-       reg = readl(ahb1->reg);
-
-       /* need to know which parent is used to apply pre-divider */
-       parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
-       sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
-
-       reg = SUN6I_AHB1_DIV_SET(reg, div);
-       reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
-       writel(reg, ahb1->reg);
-
-       spin_unlock_irqrestore(&clk_lock, flags);
-
-       return 0;
-}
-
-static const struct clk_ops sun6i_ahb1_clk_ops = {
-       .determine_rate = sun6i_ahb1_clk_determine_rate,
-       .recalc_rate    = sun6i_ahb1_clk_recalc_rate,
-       .set_rate       = sun6i_ahb1_clk_set_rate,
-};
-
-static void __init sun6i_ahb1_clk_setup(struct device_node *node)
-{
-       struct clk *clk;
-       struct sun6i_ahb1_clk *ahb1;
-       struct clk_mux *mux;
-       const char *clk_name = node->name;
-       const char *parents[SUN6I_AHB1_MAX_PARENTS];
-       void __iomem *reg;
-       int i;
-
-       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (IS_ERR(reg))
-               return;
-
-       /* we have a mux, we will have >1 parents */
-       i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS);
-       of_property_read_string(node, "clock-output-names", &clk_name);
-
-       ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
-       if (!ahb1)
-               return;
-
-       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
-       if (!mux) {
-               kfree(ahb1);
-               return;
-       }
-
-       /* set up clock properties */
-       mux->reg = reg;
-       mux->shift = SUN6I_AHB1_MUX_SHIFT;
-       mux->mask = SUN6I_AHB1_MUX_MASK;
-       mux->lock = &clk_lock;
-       ahb1->reg = reg;
-
-       clk = clk_register_composite(NULL, clk_name, parents, i,
-                                    &mux->hw, &clk_mux_ops,
-                                    &ahb1->hw, &sun6i_ahb1_clk_ops,
-                                    NULL, NULL, 0);
-
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
-}
-CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
-
 /* Maximum number of parents our clocks have */
 #define SUNXI_MAX_PARENTS      5
 
@@ -246,49 +38,45 @@ CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_se
  * parent_rate is always 24Mhz
  */
 
-static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll1_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a 6M multiple */
-       div = *freq / 6000000;
-       *freq = 6000000 * div;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       div = req->rate / 6000000;
+       req->rate = 6000000 * div;
 
        /* m is always zero for pll1 */
-       *m = 0;
+       req->m = 0;
 
        /* k is 1 only on these cases */
-       if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
-               *k = 1;
+       if (req->rate >= 768000000 || req->rate == 42000000 ||
+                       req->rate == 54000000)
+               req->k = 1;
        else
-               *k = 0;
+               req->k = 0;
 
        /* p will be 3 for divs under 10 */
        if (div < 10)
-               *p = 3;
+               req->p = 3;
 
        /* p will be 2 for divs between 10 - 20 and odd divs under 32 */
        else if (div < 20 || (div < 32 && (div & 1)))
-               *p = 2;
+               req->p = 2;
 
        /* p will be 1 for even divs under 32, divs under 40 and odd pairs
         * of divs between 40-62 */
        else if (div < 40 || (div < 64 && (div & 2)))
-               *p = 1;
+               req->p = 1;
 
        /* any other entries have p = 0 */
        else
-               *p = 0;
+               req->p = 0;
 
        /* calculate a suitable n based on k and p */
-       div <<= *p;
-       div /= (*k + 1);
-       *n = div / 4;
+       div <<= req->p;
+       div /= (req->k + 1);
+       req->n = div / 4;
 }
 
 /**
@@ -297,15 +85,14 @@ static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
  * rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
  * parent_rate should always be 24MHz
  */
-static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll1_factors(struct factors_request *req)
 {
        /*
         * We can operate only on MHz, this will make our life easier
         * later.
         */
-       u32 freq_mhz = *freq / 1000000;
-       u32 parent_freq_mhz = parent_rate / 1000000;
+       u32 freq_mhz = req->rate / 1000000;
+       u32 parent_freq_mhz = req->parent_rate / 1000000;
 
        /*
         * Round down the frequency to the closest multiple of either
@@ -319,28 +106,20 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
        else
                freq_mhz = round_freq_16;
 
-       *freq = freq_mhz * 1000000;
-
-       /*
-        * If the factors pointer are null, we were just called to
-        * round down the frequency.
-        * Exit.
-        */
-       if (n == NULL)
-               return;
+       req->rate = freq_mhz * 1000000;
 
        /* If the frequency is a multiple of 32 MHz, k is always 3 */
        if (!(freq_mhz % 32))
-               *k = 3;
+               req->k = 3;
        /* If the frequency is a multiple of 9 MHz, k is always 2 */
        else if (!(freq_mhz % 9))
-               *k = 2;
+               req->k = 2;
        /* If the frequency is a multiple of 8 MHz, k is always 1 */
        else if (!(freq_mhz % 8))
-               *k = 1;
+               req->k = 1;
        /* Otherwise, we don't use the k factor */
        else
-               *k = 0;
+               req->k = 0;
 
        /*
         * If the frequency is a multiple of 2 but not a multiple of
@@ -351,27 +130,28 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
         * somehow relates to this frequency.
         */
        if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
-               *m = 2;
+               req->m = 2;
        /*
         * If the frequency is a multiple of 6MHz, but the factor is
         * odd, m will be 3
         */
        else if ((freq_mhz / 6) & 1)
-               *m = 3;
+               req->m = 3;
        /* Otherwise, we end up with m = 1 */
        else
-               *m = 1;
+               req->m = 1;
 
        /* Calculate n thanks to the above factors we already got */
-       *n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+       req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz)
+                - 1;
 
        /*
         * If n end up being outbound, and that we can still decrease
         * m, do it.
         */
-       if ((*n + 1) > 31 && (*m + 1) > 1) {
-               *n = (*n + 1) / 2 - 1;
-               *m = (*m + 1) / 2 - 1;
+       if ((req->n + 1) > 31 && (req->m + 1) > 1) {
+               req->n = (req->n + 1) / 2 - 1;
+               req->m = (req->m + 1) / 2 - 1;
        }
 }
 
@@ -382,45 +162,41 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
  * parent_rate is always 24Mhz
  */
 
-static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun8i_a23_get_pll1_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a 6M multiple */
-       div = *freq / 6000000;
-       *freq = 6000000 * div;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       div = req->rate / 6000000;
+       req->rate = 6000000 * div;
 
        /* m is always zero for pll1 */
-       *m = 0;
+       req->m = 0;
 
        /* k is 1 only on these cases */
-       if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
-               *k = 1;
+       if (req->rate >= 768000000 || req->rate == 42000000 ||
+                       req->rate == 54000000)
+               req->k = 1;
        else
-               *k = 0;
+               req->k = 0;
 
        /* p will be 2 for divs under 20 and odd divs under 32 */
        if (div < 20 || (div < 32 && (div & 1)))
-               *p = 2;
+               req->p = 2;
 
        /* p will be 1 for even divs under 32, divs under 40 and odd pairs
         * of divs between 40-62 */
        else if (div < 40 || (div < 64 && (div & 2)))
-               *p = 1;
+               req->p = 1;
 
        /* any other entries have p = 0 */
        else
-               *p = 0;
+               req->p = 0;
 
        /* calculate a suitable n based on k and p */
-       div <<= *p;
-       div /= (*k + 1);
-       *n = div / 4 - 1;
+       div <<= req->p;
+       div /= (req->k + 1);
+       req->n = div / 4 - 1;
 }
 
 /**
@@ -430,56 +206,55 @@ static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
  * parent_rate is always 24Mhz
  */
 
-static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll5_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a parent_rate multiple (24M) */
-       div = *freq / parent_rate;
-       *freq = parent_rate * div;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       div = req->rate / req->parent_rate;
+       req->rate = req->parent_rate * div;
 
        if (div < 31)
-               *k = 0;
+               req->k = 0;
        else if (div / 2 < 31)
-               *k = 1;
+               req->k = 1;
        else if (div / 3 < 31)
-               *k = 2;
+               req->k = 2;
        else
-               *k = 3;
+               req->k = 3;
 
-       *n = DIV_ROUND_UP(div, (*k+1));
+       req->n = DIV_ROUND_UP(div, (req->k + 1));
 }
 
 /**
- * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
- * PLL6x2 rate is calculated as follows
- * rate = parent_rate * (n + 1) * (k + 1)
+ * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6
+ * PLL6 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1) / 2
  * parent_rate is always 24Mhz
  */
 
-static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll6_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a parent_rate multiple (24M) */
-       div = *freq / parent_rate;
-       *freq = parent_rate * div;
+       div = req->rate / (req->parent_rate / 2);
+       req->rate = (req->parent_rate / 2) * div;
 
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       req->k = div / 32;
+       if (req->k > 3)
+               req->k = 3;
 
-       *k = div / 32;
-       if (*k > 3)
-               *k = 3;
+       req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
+}
+
+static void sun6i_a31_pll6_recalc(struct factors_request *req)
+{
+       req->rate = req->parent_rate;
 
-       *n = DIV_ROUND_UP(div, (*k+1)) - 1;
+       req->rate *= req->n + 1;
+       req->rate *= req->k + 1;
+       req->rate /= 2;
 }
 
 /**
@@ -488,37 +263,94 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
  * rate = parent_rate >> p
  */
 
-static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun5i_a13_get_ahb_factors(struct factors_request *req)
 {
        u32 div;
 
        /* divide only */
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
        /*
         * user manual says valid speed is 8k ~ 276M, but tests show it
         * can work at speeds up to 300M, just after reparenting to pll6
         */
-       if (*freq < 8000)
-               *freq = 8000;
-       if (*freq > 300000000)
-               *freq = 300000000;
+       if (req->rate < 8000)
+               req->rate = 8000;
+       if (req->rate > 300000000)
+               req->rate = 300000000;
 
-       div = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+       div = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
 
        /* p = 0 ~ 3 */
        if (div > 3)
                div = 3;
 
-       *freq = parent_rate >> div;
+       req->rate = req->parent_rate >> div;
 
-       /* we were called to round the frequency, we can now return */
-       if (p == NULL)
-               return;
+       req->p = div;
+}
+
+#define SUN6I_AHB1_PARENT_PLL6 3
+
+/**
+ * sun6i_a31_get_ahb_factors() - calculates m, p factors for AHB
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p
+ *
+ * if parent is pll6, then
+ * parent_rate = pll6 rate / (m + 1)
+ */
+
+static void sun6i_get_ahb1_factors(struct factors_request *req)
+{
+       u8 div, calcp, calcm = 1;
+
+       /*
+        * clock can only divide, so we will never be able to achieve
+        * frequencies higher than the parent frequency
+        */
+       if (req->parent_rate && req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
+
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
+
+       /* calculate pre-divider if parent is pll6 */
+       if (req->parent_index == SUN6I_AHB1_PARENT_PLL6) {
+               if (div < 4)
+                       calcp = 0;
+               else if (div / 2 < 4)
+                       calcp = 1;
+               else if (div / 4 < 4)
+                       calcp = 2;
+               else
+                       calcp = 3;
 
-       *p = div;
+               calcm = DIV_ROUND_UP(div, 1 << calcp);
+       } else {
+               calcp = __roundup_pow_of_two(div);
+               calcp = calcp > 3 ? 3 : calcp;
+       }
+
+       req->rate = (req->parent_rate / calcm) >> calcp;
+       req->p = calcp;
+       req->m = calcm - 1;
+}
+
+/**
+ * sun6i_ahb1_recalc() - calculates AHB clock rate from m, p factors and
+ *                      parent index
+ */
+static void sun6i_ahb1_recalc(struct factors_request *req)
+{
+       req->rate = req->parent_rate;
+
+       /* apply pre-divider first if parent is pll6 */
+       if (req->parent_index == SUN6I_AHB1_PARENT_PLL6)
+               req->rate /= req->m + 1;
+
+       /* clk divider */
+       req->rate >>= req->p;
 }
 
 /**
@@ -527,39 +359,34 @@ static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_apb1_factors(struct factors_request *req)
 {
        u8 calcm, calcp;
+       int div;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       parent_rate = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        /* Invalid rate! */
-       if (parent_rate > 32)
+       if (div > 32)
                return;
 
-       if (parent_rate <= 4)
+       if (div <= 4)
                calcp = 0;
-       else if (parent_rate <= 8)
+       else if (div <= 8)
                calcp = 1;
-       else if (parent_rate <= 16)
+       else if (div <= 16)
                calcp = 2;
        else
                calcp = 3;
 
-       calcm = (parent_rate >> calcp) - 1;
+       calcm = (req->parent_rate >> calcp) - 1;
 
-       *freq = (parent_rate >> calcp) / (calcm + 1);
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm;
-       *p = calcp;
+       req->rate = (req->parent_rate >> calcp) / (calcm + 1);
+       req->m = calcm;
+       req->p = calcp;
 }
 
 
@@ -571,17 +398,16 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
-                                     u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun7i_a20_get_out_factors(struct factors_request *req)
 {
        u8 div, calcm, calcp;
 
        /* These clocks can only divide, so we will never be able to achieve
         * frequencies higher than the parent frequency */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
+       if (req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        if (div < 32)
                calcp = 0;
@@ -594,21 +420,16 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
 
        calcm = DIV_ROUND_UP(div, 1 << calcp);
 
-       *freq = (parent_rate >> calcp) / calcm;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm - 1;
-       *p = calcp;
+       req->rate = (req->parent_rate >> calcp) / calcm;
+       req->m = calcm - 1;
+       req->p = calcp;
 }
 
 /**
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
 
-static struct clk_factors_config sun4i_pll1_config = {
+static const struct clk_factors_config sun4i_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -619,7 +440,7 @@ static struct clk_factors_config sun4i_pll1_config = {
        .pwidth = 2,
 };
 
-static struct clk_factors_config sun6i_a31_pll1_config = {
+static const struct clk_factors_config sun6i_a31_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -629,7 +450,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
        .n_start = 1,
 };
 
-static struct clk_factors_config sun8i_a23_pll1_config = {
+static const struct clk_factors_config sun8i_a23_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -641,14 +462,14 @@ static struct clk_factors_config sun8i_a23_pll1_config = {
        .n_start = 1,
 };
 
-static struct clk_factors_config sun4i_pll5_config = {
+static const struct clk_factors_config sun4i_pll5_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
        .kwidth = 2,
 };
 
-static struct clk_factors_config sun6i_a31_pll6_config = {
+static const struct clk_factors_config sun6i_a31_pll6_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -656,12 +477,19 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
        .n_start = 1,
 };
 
-static struct clk_factors_config sun5i_a13_ahb_config = {
+static const struct clk_factors_config sun5i_a13_ahb_config = {
+       .pshift = 4,
+       .pwidth = 2,
+};
+
+static const struct clk_factors_config sun6i_ahb1_config = {
+       .mshift = 6,
+       .mwidth = 2,
        .pshift = 4,
        .pwidth = 2,
 };
 
-static struct clk_factors_config sun4i_apb1_config = {
+static const struct clk_factors_config sun4i_apb1_config = {
        .mshift = 0,
        .mwidth = 5,
        .pshift = 16,
@@ -669,7 +497,7 @@ static struct clk_factors_config sun4i_apb1_config = {
 };
 
 /* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun7i_a20_out_config = {
+static const struct clk_factors_config sun7i_a20_out_config = {
        .mshift = 8,
        .mwidth = 5,
        .pshift = 20,
@@ -718,7 +546,7 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
        .enable = 31,
        .table = &sun6i_a31_pll6_config,
        .getter = sun6i_a31_get_pll6_factors,
-       .name = "pll6x2",
+       .recalc = sun6i_a31_pll6_recalc,
 };
 
 static const struct factors_data sun5i_a13_ahb_data __initconst = {
@@ -728,6 +556,14 @@ static const struct factors_data sun5i_a13_ahb_data __initconst = {
        .getter = sun5i_a13_get_ahb_factors,
 };
 
+static const struct factors_data sun6i_ahb1_data __initconst = {
+       .mux = 12,
+       .muxmask = BIT(1) | BIT(0),
+       .table = &sun6i_ahb1_config,
+       .getter = sun6i_get_ahb1_factors,
+       .recalc = sun6i_ahb1_recalc,
+};
+
 static const struct factors_data sun4i_apb1_data __initconst = {
        .mux = 24,
        .muxmask = BIT(1) | BIT(0),
@@ -758,6 +594,68 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
        return sunxi_factors_register(node, data, &clk_lock, reg);
 }
 
+static void __init sun4i_pll1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun4i_pll1_data);
+}
+CLK_OF_DECLARE(sun4i_pll1, "allwinner,sun4i-a10-pll1-clk",
+              sun4i_pll1_clk_setup);
+
+static void __init sun6i_pll1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun6i_a31_pll1_data);
+}
+CLK_OF_DECLARE(sun6i_pll1, "allwinner,sun6i-a31-pll1-clk",
+              sun6i_pll1_clk_setup);
+
+static void __init sun8i_pll1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun8i_a23_pll1_data);
+}
+CLK_OF_DECLARE(sun8i_pll1, "allwinner,sun8i-a23-pll1-clk",
+              sun8i_pll1_clk_setup);
+
+static void __init sun7i_pll4_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun7i_a20_pll4_data);
+}
+CLK_OF_DECLARE(sun7i_pll4, "allwinner,sun7i-a20-pll4-clk",
+              sun7i_pll4_clk_setup);
+
+static void __init sun6i_pll6_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun6i_a31_pll6_data);
+}
+CLK_OF_DECLARE(sun6i_pll6, "allwinner,sun6i-a31-pll6-clk",
+              sun6i_pll6_clk_setup);
+
+static void __init sun5i_ahb_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun5i_a13_ahb_data);
+}
+CLK_OF_DECLARE(sun5i_ahb, "allwinner,sun5i-a13-ahb-clk",
+              sun5i_ahb_clk_setup);
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun6i_ahb1_data);
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk",
+              sun6i_ahb1_clk_setup);
+
+static void __init sun4i_apb1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun4i_apb1_data);
+}
+CLK_OF_DECLARE(sun4i_apb1, "allwinner,sun4i-a10-apb1-clk",
+              sun4i_apb1_clk_setup);
+
+static void __init sun7i_out_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun7i_a20_out_data);
+}
+CLK_OF_DECLARE(sun7i_out, "allwinner,sun7i-a20-out-clk",
+              sun7i_out_clk_setup);
 
 
 /**
@@ -782,8 +680,8 @@ static const struct mux_data sun8i_h3_ahb2_mux_data __initconst = {
        .shift = 0,
 };
 
-static void __init sunxi_mux_clk_setup(struct device_node *node,
-                                      struct mux_data *data)
+static struct clk * __init sunxi_mux_clk_setup(struct device_node *node,
+                                              const struct mux_data *data)
 {
        struct clk *clk;
        const char *clk_name = node->name;
@@ -794,19 +692,60 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
        reg = of_iomap(node, 0);
 
        i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
-       of_property_read_string(node, "clock-output-names", &clk_name);
+       if (of_property_read_string(node, "clock-output-names", &clk_name)) {
+               pr_warn("%s: could not read clock-output-names for \"%s\"\n",
+                       __func__, clk_name);
+               goto out_unmap;
+       }
 
        clk = clk_register_mux(NULL, clk_name, parents, i,
                               CLK_SET_RATE_PARENT, reg,
                               data->shift, SUNXI_MUX_GATE_WIDTH,
                               0, &clk_lock);
 
-       if (clk) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
+       if (IS_ERR(clk)) {
+               pr_warn("%s: failed to register mux clock %s: %ld\n", __func__,
+                       clk_name, PTR_ERR(clk));
+               goto out_unmap;
        }
+
+       of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+       return clk;
+
+out_unmap:
+       iounmap(reg);
+       return NULL;
 }
 
+static void __init sun4i_cpu_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+
+       clk = sunxi_mux_clk_setup(node, &sun4i_cpu_mux_data);
+       if (!clk)
+               return;
+
+       /* Protect CPU clock */
+       __clk_get(clk);
+       clk_prepare_enable(clk);
+}
+CLK_OF_DECLARE(sun4i_cpu, "allwinner,sun4i-a10-cpu-clk",
+              sun4i_cpu_clk_setup);
+
+static void __init sun6i_ahb1_mux_clk_setup(struct device_node *node)
+{
+       sunxi_mux_clk_setup(node, &sun6i_a31_ahb1_mux_data);
+}
+CLK_OF_DECLARE(sun6i_ahb1_mux, "allwinner,sun6i-a31-ahb1-mux-clk",
+              sun6i_ahb1_mux_clk_setup);
+
+static void __init sun8i_ahb2_clk_setup(struct device_node *node)
+{
+       sunxi_mux_clk_setup(node, &sun8i_h3_ahb2_mux_data);
+}
+CLK_OF_DECLARE(sun8i_ahb2, "allwinner,sun8i-h3-ahb2-clk",
+              sun8i_ahb2_clk_setup);
 
 
 /**
@@ -865,7 +804,7 @@ static const struct div_data sun4i_apb0_data __initconst = {
 };
 
 static void __init sunxi_divider_clk_setup(struct device_node *node,
-                                          struct div_data *data)
+                                          const struct div_data *data)
 {
        struct clk *clk;
        const char *clk_name = node->name;
@@ -882,12 +821,38 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
                                         reg, data->shift, data->width,
                                         data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
                                         data->table, &clk_lock);
-       if (clk) {
+       if (clk)
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
 }
 
+static void __init sun4i_ahb_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun4i_ahb_data);
+}
+CLK_OF_DECLARE(sun4i_ahb, "allwinner,sun4i-a10-ahb-clk",
+              sun4i_ahb_clk_setup);
+
+static void __init sun4i_apb0_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun4i_apb0_data);
+}
+CLK_OF_DECLARE(sun4i_apb0, "allwinner,sun4i-a10-apb0-clk",
+              sun4i_apb0_clk_setup);
+
+static void __init sun4i_axi_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun4i_axi_data);
+}
+CLK_OF_DECLARE(sun4i_axi, "allwinner,sun4i-a10-axi-clk",
+              sun4i_axi_clk_setup);
+
+static void __init sun8i_axi_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun8i_a23_axi_data);
+}
+CLK_OF_DECLARE(sun8i_axi, "allwinner,sun8i-a23-axi-clk",
+              sun8i_axi_clk_setup);
+
 
 
 /**
@@ -955,15 +920,6 @@ static const struct divs_data pll6_divs_data __initconst = {
        }
 };
 
-static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
-       .factors = &sun6i_a31_pll6_data,
-       .ndivs = 2,
-       .div = {
-               { .fixed = 2 }, /* normal output */
-               { .self = 1 }, /* base factor clock, 2x */
-       }
-};
-
 /**
  * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
  *
@@ -975,8 +931,8 @@ static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
  *           |________________________|
  */
 
-static void __init sunxi_divs_clk_setup(struct device_node *node,
-                                       struct divs_data *data)
+static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
+                                                const struct divs_data *data)
 {
        struct clk_onecell_data *clk_data;
        const char *parent;
@@ -1003,7 +959,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
 
        clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
        if (!clk_data)
-               return;
+               return NULL;
 
        clks = kcalloc(ndivs, sizeof(*clks), GFP_KERNEL);
        if (!clks)
@@ -1081,7 +1037,6 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
                                                 clkflags);
 
                WARN_ON(IS_ERR(clk_data->clks[i]));
-               clk_register_clkdev(clks[i], clk_name, NULL);
        }
 
        /* Adjust to the real max */
@@ -1089,7 +1044,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
 
        of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 
-       return;
+       return clks;
 
 free_gate:
        kfree(gate);
@@ -1097,130 +1052,28 @@ free_clks:
        kfree(clks);
 free_clkdata:
        kfree(clk_data);
+       return NULL;
 }
 
-
-
-/* Matches for factors clocks */
-static const struct of_device_id clk_factors_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
-       {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
-       {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
-       {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
-       {.compatible = "allwinner,sun5i-a13-ahb-clk", .data = &sun5i_a13_ahb_data,},
-       {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
-       {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
-       {}
-};
-
-/* Matches for divider clocks */
-static const struct of_device_id clk_div_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
-       {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
-       {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
-       {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
-       {}
-};
-
-/* Matches for divided outputs */
-static const struct of_device_id clk_divs_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
-       {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
-       {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
-       {}
-};
-
-/* Matches for mux clocks */
-static const struct of_device_id clk_mux_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
-       {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
-       {.compatible = "allwinner,sun8i-h3-ahb2-clk", .data = &sun8i_h3_ahb2_mux_data,},
-       {}
-};
-
-
-static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
-                                             void *function)
-{
-       struct device_node *np;
-       const struct div_data *data;
-       const struct of_device_id *match;
-       void (*setup_function)(struct device_node *, const void *) = function;
-
-       for_each_matching_node_and_match(np, clk_match, &match) {
-               data = match->data;
-               setup_function(np, data);
-       }
-}
-
-static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
-{
-       unsigned int i;
-
-       /* Register divided output clocks */
-       of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
-
-       /* Register factor clocks */
-       of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
-
-       /* Register divider clocks */
-       of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
-
-       /* Register mux clocks */
-       of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
-
-       /* Protect the clocks that needs to stay on */
-       for (i = 0; i < nclocks; i++) {
-               struct clk *clk = clk_get(NULL, clocks[i]);
-
-               if (!IS_ERR(clk))
-                       clk_prepare_enable(clk);
-       }
-}
-
-static const char *sun4i_a10_critical_clocks[] __initdata = {
-       "pll5_ddr",
-};
-
-static void __init sun4i_a10_init_clocks(struct device_node *node)
+static void __init sun4i_pll5_clk_setup(struct device_node *node)
 {
-       sunxi_init_clocks(sun4i_a10_critical_clocks,
-                         ARRAY_SIZE(sun4i_a10_critical_clocks));
-}
-CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
+       struct clk **clks;
 
-static const char *sun5i_critical_clocks[] __initdata = {
-       "cpu",
-       "pll5_ddr",
-};
+       clks = sunxi_divs_clk_setup(node, &pll5_divs_data);
+       if (!clks)
+               return;
 
-static void __init sun5i_init_clocks(struct device_node *node)
-{
-       sunxi_init_clocks(sun5i_critical_clocks,
-                         ARRAY_SIZE(sun5i_critical_clocks));
+       /* Protect PLL5_DDR */
+       __clk_get(clks[0]);
+       clk_prepare_enable(clks[0]);
 }
-CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_r8_clk_init, "allwinner,sun5i-r8", sun5i_init_clocks);
-CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
+CLK_OF_DECLARE(sun4i_pll5, "allwinner,sun4i-a10-pll5-clk",
+              sun4i_pll5_clk_setup);
 
-static const char *sun6i_critical_clocks[] __initdata = {
-       "cpu",
-};
-
-static void __init sun6i_init_clocks(struct device_node *node)
+static void __init sun4i_pll6_clk_setup(struct device_node *node)
 {
-       sunxi_init_clocks(sun6i_critical_clocks,
-                         ARRAY_SIZE(sun6i_critical_clocks));
+       sunxi_divs_clk_setup(node, &pll6_divs_data);
 }
-CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
-CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_h3_clk_init, "allwinner,sun8i-h3", sun6i_init_clocks);
+CLK_OF_DECLARE(sun4i_pll6, "allwinner,sun4i-a10-pll6-clk",
+              sun4i_pll6_clk_setup);
 
-static void __init sun9i_init_clocks(struct device_node *node)
-{
-       sunxi_init_clocks(NULL, 0);
-}
-CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
index 67b8e38f4ee96d1dfec6a1230f0258ab65285980..5432b1c198a4e2fc77825d21f9382eee30b2249c 100644 (file)
@@ -216,6 +216,18 @@ static void __init sun8i_a23_usb_setup(struct device_node *node)
 }
 CLK_OF_DECLARE(sun8i_a23_usb, "allwinner,sun8i-a23-usb-clk", sun8i_a23_usb_setup);
 
+static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
+       .clk_mask =  BIT(19) | BIT(18) | BIT(17) | BIT(16) |
+                    BIT(11) | BIT(10) | BIT(9) | BIT(8),
+       .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+};
+
+static void __init sun8i_h3_usb_setup(struct device_node *node)
+{
+       sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
+
 static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = {
        .clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
        .reset_mask = BIT(19) | BIT(18) | BIT(17),
@@ -243,15 +255,3 @@ static void __init sun9i_a80_usb_phy_setup(struct device_node *node)
        sunxi_usb_clk_setup(node, &sun9i_a80_usb_phy_data, &a80_usb_phy_lock);
 }
 CLK_OF_DECLARE(sun9i_a80_usb_phy, "allwinner,sun9i-a80-usb-phy-clk", sun9i_a80_usb_phy_setup);
-
-static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
-       .clk_mask =  BIT(19) | BIT(18) | BIT(17) | BIT(16) |
-                    BIT(11) | BIT(10) | BIT(9) | BIT(8),
-       .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
-};
-
-static void __init sun8i_h3_usb_setup(struct device_node *node)
-{
-       sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
-}
-CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
index e1fe8f35d45c47c553997845657e09d3b71ca9a1..74e7544f861ba083f63f1d5aa886a7a90b0f295f 100644 (file)
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
                struct emc_timing *timing = tegra->timings + (i++);
 
                err = load_one_timing_from_dt(tegra, timing, child);
-               if (err)
+               if (err) {
+                       of_node_put(child);
                        return err;
+               }
 
                timing->ram_code = ram_code;
        }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
                 * fuses until the apbmisc driver is loaded.
                 */
                err = load_timings_from_dt(tegra, node, node_ram_code);
+               of_node_put(node);
                if (err)
                        return ERR_PTR(err);
-               of_node_put(node);
                break;
        }
 
index 19ce0738ee764bb13e9dfde5ca542fd2cf472e9a..62ea38187b715814ac5d7b5265cbd4cd934a22c3 100644 (file)
@@ -11,6 +11,7 @@ enum clk_id {
        tegra_clk_afi,
        tegra_clk_amx,
        tegra_clk_amx1,
+       tegra_clk_apb2ape,
        tegra_clk_apbdma,
        tegra_clk_apbif,
        tegra_clk_ape,
index a534bfab30b39ee53c81a89c3a5c5b1d2979144e..6ac3f843e7caa3a8abb6513a0eb9fc2c829d3a39 100644 (file)
 #define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
                                PLLE_SS_CNTL_SSC_BYP)
 #define PLLE_SS_MAX_MASK 0x1ff
-#define PLLE_SS_MAX_VAL 0x25
+#define PLLE_SS_MAX_VAL_TEGRA114 0x25
+#define PLLE_SS_MAX_VAL_TEGRA210 0x21
 #define PLLE_SS_INC_MASK (0xff << 16)
 #define PLLE_SS_INC_VAL (0x1 << 16)
 #define PLLE_SS_INCINTRV_MASK (0x3f << 24)
-#define PLLE_SS_INCINTRV_VAL (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
 #define PLLE_SS_COEFFICIENTS_MASK \
        (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
-#define PLLE_SS_COEFFICIENTS_VAL \
-       (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
+       (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
+        PLLE_SS_INCINTRV_VAL_TEGRA114)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
+       (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
+        PLLE_SS_INCINTRV_VAL_TEGRA210)
 
 #define PLLE_AUX_PLLP_SEL      BIT(2)
 #define PLLE_AUX_USE_LOCKDET   BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
 static int clk_plle_enable(struct clk_hw *hw)
 {
        struct tegra_clk_pll *pll = to_clk_pll(hw);
-       unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+       unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
        struct tegra_clk_pll_freq_table sel;
        u32 val;
        int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        u32 val;
        int ret;
        unsigned long flags = 0;
-       unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+       unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
        if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
                return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        val |= PLLE_MISC_IDDQ_SW_CTRL;
        val &= ~PLLE_MISC_IDDQ_SW_VALUE;
        val |= PLLE_MISC_PLLE_PTS;
-       val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+       val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
        pll_writel_misc(val, pll);
        udelay(5);
 
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        val = pll_readl(PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
        val &= ~PLLE_SS_COEFFICIENTS_MASK;
-       val |= PLLE_SS_COEFFICIENTS_VAL;
+       val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
        pll_writel(val, PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
        pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
        struct tegra_clk_pll *pll = to_clk_pll(hw);
        struct tegra_clk_pll_freq_table sel;
        u32 val;
-       int ret;
+       int ret = 0;
        unsigned long flags = 0;
-       unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+       unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
        if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
                return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
        if (pll->lock)
                spin_lock_irqsave(pll->lock, flags);
 
+       val = pll_readl(pll->params->aux_reg, pll);
+       if (val & PLLE_AUX_SEQ_ENABLE)
+               goto out;
+
        val = pll_readl_base(pll);
        val &= ~BIT(30); /* Disable lock override */
        pll_writel_base(val, pll);
 
-       val = pll_readl(pll->params->aux_reg, pll);
-       val |= PLLE_AUX_ENABLE_SWCTL;
-       val &= ~PLLE_AUX_SEQ_ENABLE;
-       pll_writel(val, pll->params->aux_reg, pll);
-       udelay(1);
-
        val = pll_readl_misc(pll);
        val |= PLLE_MISC_LOCK_ENABLE;
        val |= PLLE_MISC_IDDQ_SW_CTRL;
        val &= ~PLLE_MISC_IDDQ_SW_VALUE;
        val |= PLLE_MISC_PLLE_PTS;
-       val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+       val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
        pll_writel_misc(val, pll);
        udelay(5);
 
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
        val = pll_readl(PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
        val &= ~PLLE_SS_COEFFICIENTS_MASK;
-       val |= PLLE_SS_COEFFICIENTS_VAL;
+       val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
        pll_writel(val, PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
        pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
        if (pll->lock)
                spin_lock_irqsave(pll->lock, flags);
 
+       /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
+       val = pll_readl(pll->params->aux_reg, pll);
+       if (val & PLLE_AUX_SEQ_ENABLE)
+               goto out;
+
        val = pll_readl_base(pll);
        val &= ~PLLE_BASE_ENABLE;
        pll_writel_base(val, pll);
 
+       val = pll_readl(pll->params->aux_reg, pll);
+       val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
+       pll_writel(val, pll->params->aux_reg, pll);
+
        val = pll_readl_misc(pll);
        val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
        pll_writel_misc(val, pll);
        udelay(1);
 
+out:
        if (pll->lock)
                spin_unlock_irqrestore(pll->lock, flags);
 }
index 6ad381a888a6176801494c60bcc4f057f23ebe82..ea2b9cbf9e70b0c10204d5d34dc96ab929d36427 100644 (file)
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
        XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
        MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
-       MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
+       MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
        MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
        MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
        MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
        MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
        MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
-       MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c),
+       I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
        MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
        MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
        MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
        GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
        GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
+       GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
 };
 
 static struct tegra_periph_init_data div_clks[] = {
index 4559a20e3af6e424e52c7b08930cd7664965fea2..474de0f0c26d80b0f9b20ff845575ce049165038 100644 (file)
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
                                         "pll_p", "pll_p_out4", "unused",
                                         "unused", "pll_x", "pll_x_out0" };
 
-const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
        .gen = gen4,
        .sclk_parents = sclk_parents,
        .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
                                        "unused", "unused", "unused", "unused",
                                        "dfllCPU_out" };
 
-const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
        .gen = gen5,
        .sclk_parents = sclk_parents_gen5,
        .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
        *dt_clk = clk;
 }
 
-void __init tegra_super_clk_init(void __iomem *clk_base,
+static void __init tegra_super_clk_init(void __iomem *clk_base,
                                void __iomem *pmc_base,
                                struct tegra_clk *tegra_clks,
                                struct tegra_clk_pll_params *params,
index 58514c44ea830c476b8b23606e962ecf8ef8ef7a..637041fd53ad11b95cd305952cb0a80eaf0d3b61 100644 (file)
@@ -59,8 +59,8 @@
 #define PLLC3_MISC3 0x50c
 
 #define PLLM_BASE 0x90
-#define PLLM_MISC0 0x9c
 #define PLLM_MISC1 0x98
+#define PLLM_MISC2 0x9c
 #define PLLP_BASE 0xa0
 #define PLLP_MISC0 0xac
 #define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
 #define PLLC4_MISC0 0x5a8
 #define PLLC4_OUT 0x5e4
 #define PLLMB_BASE 0x5e8
-#define PLLMB_MISC0 0x5ec
+#define PLLMB_MISC1 0x5ec
 #define PLLA1_BASE 0x6a4
 #define PLLA1_MISC0 0x6a8
 #define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
 };
 
 static const char *mux_pllmcp_clkm[] = {
-       "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
+       "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
+       "pll_p",
 };
 #define mux_pllmcp_clkm_idx NULL
 
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
 /* PLLMB */
 #define PLLMB_BASE_LOCK                        (1 << 27)
 
-#define PLLMB_MISC0_LOCK_OVERRIDE      (1 << 18)
-#define PLLMB_MISC0_IDDQ               (1 << 17)
-#define PLLMB_MISC0_LOCK_ENABLE                (1 << 16)
+#define PLLMB_MISC1_LOCK_OVERRIDE      (1 << 18)
+#define PLLMB_MISC1_IDDQ               (1 << 17)
+#define PLLMB_MISC1_LOCK_ENABLE                (1 << 16)
 
-#define PLLMB_MISC0_DEFAULT_VALUE      0x00030000
-#define PLLMB_MISC0_WRITE_MASK         0x0007ffff
+#define PLLMB_MISC1_DEFAULT_VALUE      0x00030000
+#define PLLMB_MISC1_WRITE_MASK         0x0007ffff
 
 /* PLLP */
 #define PLLP_BASE_OVERRIDE             (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
                        PLLCX_MISC3_WRITE_MASK);
 }
 
-void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
+static void tegra210_pllcx_set_defaults(const char *name,
+                                       struct tegra_clk_pll *pllcx)
 {
        pllcx->params->defaults_set = true;
 
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
        udelay(1);
 }
 
-void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_C", pllcx);
 }
 
-void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_C2", pllcx);
 }
 
-void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_C3", pllcx);
 }
 
-void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
+static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_A1", pllcx);
 }
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
  * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
  * Fractional SDM is allowed to provide exact audio rates.
  */
-void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
+static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
 {
        u32 mask;
        u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
  * PLLD
  * PLL with fractional SDM.
  */
-void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
+static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
 {
        u32 val;
        u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
        udelay(1);
 }
 
-void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
+static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
 {
        plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
                        PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
                        PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
 }
 
-void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
+static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
 {
        plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
                        PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
  * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
  * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
  */
-void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
+static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
 {
        plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
 }
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
  * PLLRE
  * VCO is exposed to the clock tree directly along with post-divider output
  */
-void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
+static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
 {
        u32 mask;
        u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
 {
        unsigned long input_rate;
 
-       if (!IS_ERR_OR_NULL(hw->clk)) {
+       /* cf rate */
+       if (!IS_ERR_OR_NULL(hw->clk))
                input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
-               /* cf rate */
-               input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
-       } else {
+       else
                input_rate = 38400000;
-       }
+
+       input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
 
        switch (input_rate) {
        case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
                        PLLX_MISC5_WRITE_MASK);
 }
 
-void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
+static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
 {
        u32 val;
        u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
 }
 
 /* PLLMB */
-void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
+static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
 {
        u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
 
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
                 * PLL is ON: check if defaults already set, then set those
                 * that can be updated in flight.
                 */
-               val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ);
-               mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE;
+               val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
+               mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
                _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
-                               ~mask & PLLMB_MISC0_WRITE_MASK);
+                               ~mask & PLLMB_MISC1_WRITE_MASK);
 
                /* Enable lock detect */
                val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
                val &= ~mask;
-               val |= PLLMB_MISC0_DEFAULT_VALUE & mask;
+               val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
                writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
                udelay(1);
 
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
        }
 
        /* set IDDQ, enable lock detect */
-       writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE,
+       writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
                        clk_base + pllmb->params->ext_misc_reg[0]);
        udelay(1);
 }
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
                        ~mask & PLLP_MISC1_WRITE_MASK);
 }
 
-void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
+static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
 {
        u32 mask;
        u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
                        ~mask & PLLU_MISC1_WRITE_MASK);
 }
 
-void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
+static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
 {
        u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
 
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
        cfg->m *= PLL_SDM_COEFF;
 }
 
-unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
-                                         unsigned long parent_rate)
+static unsigned long
+tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
+                           unsigned long parent_rate)
 {
        unsigned long vco_min = params->vco_min;
 
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
        .mdiv_default = 3,
        .div_nmp = &pllc_nmp,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _pllc_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
        .ext_misc_reg[2] = PLLC2_MISC2,
        .ext_misc_reg[3] = PLLC2_MISC3,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _pllc2_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
        .ext_misc_reg[2] = PLLC3_MISC2,
        .ext_misc_reg[3] = PLLC3_MISC3,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _pllc3_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
        .base_reg = PLLC4_BASE,
        .misc_reg = PLLC4_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .max_p = PLL_QLIN_PDIV_MAX,
        .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
        .div_nmp = &pllss_nmp,
        .freq_table = pll_c4_vco_freq_table,
        .set_defaults = tegra210_pllc4_set_defaults,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
-                TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
 
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
        .vco_min = 800000000,
        .vco_max = 1866000000,
        .base_reg = PLLM_BASE,
-       .misc_reg = PLLM_MISC1,
+       .misc_reg = PLLM_MISC2,
        .lock_mask = PLL_BASE_LOCK,
        .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
        .lock_delay = 300,
-       .iddq_reg = PLLM_MISC0,
+       .iddq_reg = PLLM_MISC2,
        .iddq_bit_idx = PLLM_IDDQ_BIT,
        .max_p = PLL_QLIN_PDIV_MAX,
-       .ext_misc_reg[0] = PLLM_MISC0,
-       .ext_misc_reg[0] = PLLM_MISC1,
+       .ext_misc_reg[0] = PLLM_MISC2,
+       .ext_misc_reg[1] = PLLM_MISC1,
        .round_p_to_pdiv = pll_qlin_p_to_pdiv,
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
        .vco_min = 800000000,
        .vco_max = 1866000000,
        .base_reg = PLLMB_BASE,
-       .misc_reg = PLLMB_MISC0,
+       .misc_reg = PLLMB_MISC1,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
        .lock_delay = 300,
-       .iddq_reg = PLLMB_MISC0,
+       .iddq_reg = PLLMB_MISC1,
        .iddq_bit_idx = PLLMB_IDDQ_BIT,
        .max_p = PLL_QLIN_PDIV_MAX,
-       .ext_misc_reg[0] = PLLMB_MISC0,
+       .ext_misc_reg[0] = PLLMB_MISC1,
        .round_p_to_pdiv = pll_qlin_p_to_pdiv,
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllm_nmp,
        .freq_table = pll_m_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = tegra210_pllmb_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
        .base_reg = PLLRE_BASE,
        .misc_reg = PLLRE_MISC0,
        .lock_mask = PLLRE_MISC_LOCK,
-       .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .max_p = PLL_QLIN_PDIV_MAX,
        .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllre_nmp,
        .freq_table = pll_re_vco_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC |
-                TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
        .set_defaults = tegra210_pllre_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
        .base_reg = PLLP_BASE,
        .misc_reg = PLLP_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .iddq_reg = PLLP_MISC0,
        .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_p_freq_table,
        .fixed_rate = 408000000,
-       .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK |
-                TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
        .set_defaults = tegra210_pllp_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
        .ext_misc_reg[2] = PLLA1_MISC2,
        .ext_misc_reg[3] = PLLA1_MISC3,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _plla1_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
        .base_reg = PLLA_BASE,
        .misc_reg = PLLA_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .round_p_to_pdiv = pll_qlin_p_to_pdiv,
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
        .ext_misc_reg[1] = PLLA_MISC1,
        .ext_misc_reg[2] = PLLA_MISC2,
        .freq_table = pll_a_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW |
-                TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
        .set_defaults = tegra210_plla_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
        .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
        .base_reg = PLLD_BASE,
        .misc_reg = PLLD_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
        .lock_delay = 1000,
        .iddq_reg = PLLD_MISC0,
        .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
        .ext_misc_reg[0] = PLLD_MISC0,
        .ext_misc_reg[1] = PLLD_MISC1,
        .freq_table = pll_d_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .mdiv_default = 1,
        .set_defaults = tegra210_plld_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .base_reg = PLLD2_BASE,
        .misc_reg = PLLD2_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .iddq_reg = PLLD2_BASE,
        .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .mdiv_default = 1,
        .freq_table = tegra210_pll_d2_freq_table,
        .set_defaults = tegra210_plld2_set_defaults,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
        .set_gain = tegra210_clk_pll_set_gain,
        .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
        .base_reg = PLLDP_BASE,
        .misc_reg = PLLDP_MISC,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .iddq_reg = PLLDP_BASE,
        .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
        .mdiv_default = 1,
        .freq_table = pll_dp_freq_table,
        .set_defaults = tegra210_plldp_set_defaults,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
        .set_gain = tegra210_clk_pll_set_gain,
        .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
        .base_reg = PLLU_BASE,
        .misc_reg = PLLU_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
        .lock_delay = 1000,
        .iddq_reg = PLLU_MISC0,
        .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllu_nmp,
        .freq_table = pll_u_freq_table,
-       .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
-                TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
        .set_defaults = tegra210_pllu_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
        [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
        [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
        [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
+       [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
 
        /* PLLU_VCO */
        val = readl(clk_base + pll_u_vco_params.base_reg);
-       val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+       val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
        writel(val, clk_base + pll_u_vco_params.base_reg);
 
        clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
        { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
        { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
-       { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
-       { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
        { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
        { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
        { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
index 56777f04d2d96381593b4344ac3306bb5dcafbb4..33db7406c0e274e8c39c7b0d71509a117c324022 100644 (file)
@@ -30,6 +30,8 @@ config CLKSRC_MMIO
 config DIGICOLOR_TIMER
        bool "Digicolor timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       depends on HAS_IOMEM
        help
          Enables the support for the digicolor timer driver.
 
@@ -55,6 +57,7 @@ config ARMADA_370_XP_TIMER
        bool "Armada 370 and XP timer driver" if COMPILE_TEST
        depends on ARM
        select CLKSRC_OF
+       select CLKSRC_MMIO
        help
          Enables the support for the Armada 370 and XP timer driver.
 
@@ -76,6 +79,7 @@ config ORION_TIMER
 config SUN4I_TIMER
        bool "Sun4i timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        select CLKSRC_MMIO
        help
          Enables support for the Sun4i timer.
@@ -89,6 +93,7 @@ config SUN5I_HSTIMER
 
 config TEGRA_TIMER
        bool "Tegra timer driver" if COMPILE_TEST
+       select CLKSRC_MMIO
        depends on ARM
        help
          Enables support for the Tegra driver.
@@ -96,6 +101,7 @@ config TEGRA_TIMER
 config VT8500_TIMER
        bool "VT8500 timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        help
          Enables support for the VT8500 driver.
 
@@ -131,6 +137,7 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
 config CLKSRC_DBX500_PRCMU
        bool "Clocksource PRCMU Timer" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        help
          Use the always on PRCMU Timer as clocksource
 
@@ -248,6 +255,7 @@ config CLKSRC_EXYNOS_MCT
 config CLKSRC_SAMSUNG_PWM
        bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        help
          This is a new clocksource driver for the PWM timer found in
          Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -257,12 +265,14 @@ config CLKSRC_SAMSUNG_PWM
 config FSL_FTM_TIMER
        bool "Freescale FlexTimer Module driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        select CLKSRC_MMIO
        help
          Support for Freescale FlexTimer Module (FTM) timer.
 
 config VF_PIT_TIMER
        bool
+       select CLKSRC_MMIO
        help
          Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
 
@@ -360,6 +370,7 @@ config CLKSRC_TANGO_XTAL
 config CLKSRC_PXA
        bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        select CLKSRC_MMIO
        help
          This enables OST0 support available on PXA and SA-11x0
@@ -394,6 +405,7 @@ config CLKSRC_ST_LPC
        bool "Low power clocksource found in the LPC" if COMPILE_TEST
        select CLKSRC_OF if OF
        depends on HAS_IOMEM
+       select CLKSRC_MMIO
        help
          Enable this option to use the Low Power controller timer
          as clocksource.
index 6ee91401918eba99a33c67b554da853747a0c5fa..4da2af9694a23b267cebdfa52da624e9f43b61fe 100644 (file)
@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
 
        __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
        __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-       clk_disable(tcd->clk);
+       if (!clockevent_state_detached(d))
+               clk_disable(tcd->clk);
 
        return 0;
 }
index 9bc37c437874a6ba185799680a093598cb213948..0ca74d07005830cec5abaf92a84c59af17325616 100644 (file)
@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev,
 
 try_again:
        cpu_reg = regulator_get_optional(cpu_dev, reg);
-       if (IS_ERR(cpu_reg)) {
+       ret = PTR_ERR_OR_ZERO(cpu_reg);
+       if (ret) {
                /*
                 * If cpu's regulator supply node is present, but regulator is
                 * not yet registered, we should try defering probe.
                 */
-               if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+               if (ret == -EPROBE_DEFER) {
                        dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
                                cpu);
-                       return -EPROBE_DEFER;
+                       return ret;
                }
 
                /* Try with "cpu-supply" */
@@ -159,18 +160,16 @@ try_again:
                        goto try_again;
                }
 
-               dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
-                       cpu, PTR_ERR(cpu_reg));
+               dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
        }
 
        cpu_clk = clk_get(cpu_dev, NULL);
-       if (IS_ERR(cpu_clk)) {
+       ret = PTR_ERR_OR_ZERO(cpu_clk);
+       if (ret) {
                /* put regulator */
                if (!IS_ERR(cpu_reg))
                        regulator_put(cpu_reg);
 
-               ret = PTR_ERR(cpu_clk);
-
                /*
                 * If cpu's clk node is present, but clock is not yet
                 * registered, we should try defering probe.
index c35e7da1ed7a185fd95d0f0ae7f7b2961d235a0f..34b17447e0d19ee6fbeec91135f697a3beed653f 100644 (file)
@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
                                          bool active)
 {
        do {
-               policy = list_next_entry(policy, policy_list);
-
                /* No more policies in the list */
-               if (&policy->policy_list == &cpufreq_policy_list)
+               if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
                        return NULL;
+
+               policy = list_next_entry(policy, policy_list);
        } while (!suitable_policy(policy, active));
 
        return policy;
@@ -959,6 +959,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
        return cpufreq_add_dev_symlink(policy);
 }
 
+__weak struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return NULL;
+}
+
 static int cpufreq_init_policy(struct cpufreq_policy *policy)
 {
        struct cpufreq_governor *gov = NULL;
@@ -968,11 +973,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
 
        /* Update governor of new_policy to the governor used before hotplug */
        gov = find_governor(policy->last_governor);
-       if (gov)
+       if (gov) {
                pr_debug("Restoring governor %s for cpu %d\n",
                                policy->governor->name, policy->cpu);
-       else
-               gov = CPUFREQ_DEFAULT_GOVERNOR;
+       } else {
+               gov = cpufreq_default_governor();
+               if (!gov)
+                       return -ENODATA;
+       }
 
        new_policy.governor = gov;
 
@@ -1920,21 +1928,16 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
 
+__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+       return NULL;
+}
+
 static int __cpufreq_governor(struct cpufreq_policy *policy,
                                        unsigned int event)
 {
        int ret;
 
-       /* Only must be defined when default governor is known to have latency
-          restrictions, like e.g. conservative or ondemand.
-          That this is the case is already ensured in Kconfig
-       */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
-       struct cpufreq_governor *gov = &cpufreq_gov_performance;
-#else
-       struct cpufreq_governor *gov = NULL;
-#endif
-
        /* Don't start any governor operations if we are entering suspend */
        if (cpufreq_suspended)
                return 0;
@@ -1948,12 +1951,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
            policy->governor->max_transition_latency) {
-               if (!gov)
-                       return -EINVAL;
-               else {
+               struct cpufreq_governor *gov = cpufreq_fallback_governor();
+
+               if (gov) {
                        pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
                                policy->governor->name, gov->name);
                        policy->governor = gov;
+               } else {
+                       return -EINVAL;
                }
        }
 
index 606ad74abe6e8b248b15a142c4f3fca7241c64be..8504a70a47857ff21a39a401d29c86cde40a50a4 100644 (file)
@@ -26,10 +26,7 @@ static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                   unsigned int event);
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_conservative = {
+static struct cpufreq_governor cpufreq_gov_conservative = {
        .name                   = "conservative",
        .governor               = cs_cpufreq_governor_dbs,
        .max_transition_latency = TRANSITION_LATENCY_LIMIT,
@@ -399,6 +396,11 @@ MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_conservative;
+}
+
 fs_initcall(cpufreq_gov_dbs_init);
 #else
 module_init(cpufreq_gov_dbs_init);
index bab3a514ec128254d8cff0ccdeaf02f427625d06..e0d111024d4840e8078fc0553c9ef7dabed71447 100644 (file)
@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
        if (!have_governor_per_policy())
                cdata->gdbs_data = dbs_data;
 
+       policy->governor_data = dbs_data;
+
        ret = sysfs_create_group(get_governor_parent_kobj(policy),
                                 get_sysfs_attr(dbs_data));
        if (ret)
                goto reset_gdbs_data;
 
-       policy->governor_data = dbs_data;
-
        return 0;
 
 reset_gdbs_data:
+       policy->governor_data = NULL;
+
        if (!have_governor_per_policy())
                cdata->gdbs_data = NULL;
        cdata->exit(dbs_data, !policy->governor->initialized);
@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
        if (!cdbs->shared || cdbs->shared->policy)
                return -EBUSY;
 
-       policy->governor_data = NULL;
        if (!--dbs_data->usage_count) {
                sysfs_remove_group(get_governor_parent_kobj(policy),
                                   get_sysfs_attr(dbs_data));
 
+               policy->governor_data = NULL;
+
                if (!have_governor_per_policy())
                        cdata->gdbs_data = NULL;
 
                cdata->exit(dbs_data, policy->governor->initialized == 1);
                kfree(dbs_data);
+       } else {
+               policy->governor_data = NULL;
        }
 
        free_common_dbs_info(policy, cdata);
index eae51070c03427573708fe2bd67a081ccc2ffe82..929e193ac1c19d7601a207659ed788a72df0a3d4 100644 (file)
@@ -31,9 +31,7 @@ static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
 
 static struct od_ops od_ops;
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 static struct cpufreq_governor cpufreq_gov_ondemand;
-#endif
 
 static unsigned int default_powersave_bias;
 
@@ -554,6 +552,19 @@ static struct common_dbs_data od_dbs_cdata = {
        .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
 };
 
+static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+               unsigned int event)
+{
+       return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
+}
+
+static struct cpufreq_governor cpufreq_gov_ondemand = {
+       .name                   = "ondemand",
+       .governor               = od_cpufreq_governor_dbs,
+       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+       .owner                  = THIS_MODULE,
+};
+
 static void od_set_powersave_bias(unsigned int powersave_bias)
 {
        struct cpufreq_policy *policy;
@@ -605,22 +616,6 @@ void od_unregister_powersave_bias_handler(void)
 }
 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
 
-static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
-               unsigned int event)
-{
-       return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
-}
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
-#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
-       .name                   = "ondemand",
-       .governor               = od_cpufreq_governor_dbs,
-       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
-       .owner                  = THIS_MODULE,
-};
-
 static int __init cpufreq_gov_dbs_init(void)
 {
        return cpufreq_register_governor(&cpufreq_gov_ondemand);
@@ -638,6 +633,11 @@ MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_ondemand;
+}
+
 fs_initcall(cpufreq_gov_dbs_init);
 #else
 module_init(cpufreq_gov_dbs_init);
index cf117deb39b1f45c53ade61086236eb888d24a71..af9f4b96f5a8e1ae1fd622ce05f9b198b86ed599 100644 (file)
@@ -33,10 +33,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
        return 0;
 }
 
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_performance = {
+static struct cpufreq_governor cpufreq_gov_performance = {
        .name           = "performance",
        .governor       = cpufreq_governor_performance,
        .owner          = THIS_MODULE,
@@ -52,6 +49,19 @@ static void __exit cpufreq_gov_performance_exit(void)
        cpufreq_unregister_governor(&cpufreq_gov_performance);
 }
 
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_performance;
+}
+#endif
+#ifndef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+       return &cpufreq_gov_performance;
+}
+#endif
+
 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
 MODULE_LICENSE("GPL");
index e3b874c235eada5d353f4b02831140c6a244af68..b8b400232a7451fe392f767ed7aaecf6600c680d 100644 (file)
@@ -33,10 +33,7 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
        return 0;
 }
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_powersave = {
+static struct cpufreq_governor cpufreq_gov_powersave = {
        .name           = "powersave",
        .governor       = cpufreq_governor_powersave,
        .owner          = THIS_MODULE,
@@ -57,6 +54,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_powersave;
+}
+
 fs_initcall(cpufreq_gov_powersave_init);
 #else
 module_init(cpufreq_gov_powersave_init);
index 4dbf1db16aca0e5d29d44b002a6d4ad0cae6d139..4d16f45ee1daf3e64e23c97a9cec8b7a4ea0fc0f 100644 (file)
@@ -89,10 +89,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
        return rc;
 }
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_userspace = {
+static struct cpufreq_governor cpufreq_gov_userspace = {
        .name           = "userspace",
        .governor       = cpufreq_governor_userspace,
        .store_setspeed = cpufreq_set,
@@ -116,6 +113,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_userspace;
+}
+
 fs_initcall(cpufreq_gov_userspace_init);
 #else
 module_init(cpufreq_gov_userspace_init);
index 547890fd9572179eba4c6ce81caf5bbc8b06b947..1bbc10a54c59c8a7cf6e82b06607f44c7988f3fb 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/of.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
+#include <linux/cpu.h>
+#include <trace/events/power.h>
 
 #include <asm/cputhreads.h>
 #include <asm/firmware.h>
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
 static bool rebooting, throttled, occ_reset;
+static unsigned int *core_to_chip_map;
+
+static const char * const throttle_reason[] = {
+       "No throttling",
+       "Power Cap",
+       "Processor Over Temperature",
+       "Power Supply Failure",
+       "Over Current",
+       "OCC Reset"
+};
 
 static struct chip {
        unsigned int id;
        bool throttled;
+       bool restore;
+       u8 throttle_reason;
        cpumask_t mask;
        struct work_struct throttle;
-       bool restore;
 } *chips;
 
 static int nr_chips;
@@ -312,13 +325,14 @@ static inline unsigned int get_nominal_index(void)
 static void powernv_cpufreq_throttle_check(void *data)
 {
        unsigned int cpu = smp_processor_id();
+       unsigned int chip_id = core_to_chip_map[cpu_core_index_of_thread(cpu)];
        unsigned long pmsr;
        int pmsr_pmax, i;
 
        pmsr = get_pmspr(SPRN_PMSR);
 
        for (i = 0; i < nr_chips; i++)
-               if (chips[i].id == cpu_to_chip_id(cpu))
+               if (chips[i].id == chip_id)
                        break;
 
        /* Check for Pmax Capping */
@@ -328,17 +342,17 @@ static void powernv_cpufreq_throttle_check(void *data)
                        goto next;
                chips[i].throttled = true;
                if (pmsr_pmax < powernv_pstate_info.nominal)
-                       pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
-                               cpu, chips[i].id, pmsr_pmax,
-                               powernv_pstate_info.nominal);
-               else
-                       pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
-                               cpu, chips[i].id, pmsr_pmax,
-                               powernv_pstate_info.max);
+                       pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
+                                    cpu, chips[i].id, pmsr_pmax,
+                                    powernv_pstate_info.nominal);
+               trace_powernv_throttle(chips[i].id,
+                                     throttle_reason[chips[i].throttle_reason],
+                                     pmsr_pmax);
        } else if (chips[i].throttled) {
                chips[i].throttled = false;
-               pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
-                       chips[i].id, pmsr_pmax);
+               trace_powernv_throttle(chips[i].id,
+                                     throttle_reason[chips[i].throttle_reason],
+                                     pmsr_pmax);
        }
 
        /* Check if Psafe_mode_active is set in PMSR. */
@@ -356,7 +370,7 @@ next:
 
        if (throttled) {
                pr_info("PMSR = %16lx\n", pmsr);
-               pr_crit("CPU Frequency could be throttled\n");
+               pr_warn("CPU Frequency could be throttled\n");
        }
 }
 
@@ -423,18 +437,19 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
 {
        struct chip *chip = container_of(work, struct chip, throttle);
        unsigned int cpu;
-       cpumask_var_t mask;
+       cpumask_t mask;
 
-       smp_call_function_any(&chip->mask,
+       get_online_cpus();
+       cpumask_and(&mask, &chip->mask, cpu_online_mask);
+       smp_call_function_any(&mask,
                              powernv_cpufreq_throttle_check, NULL, 0);
 
        if (!chip->restore)
-               return;
+               goto out;
 
        chip->restore = false;
-       cpumask_copy(mask, &chip->mask);
-       for_each_cpu_and(cpu, mask, cpu_online_mask) {
-               int index, tcpu;
+       for_each_cpu(cpu, &mask) {
+               int index;
                struct cpufreq_policy policy;
 
                cpufreq_get_policy(&policy, cpu);
@@ -442,20 +457,12 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
                                               policy.cur,
                                               CPUFREQ_RELATION_C, &index);
                powernv_cpufreq_target_index(&policy, index);
-               for_each_cpu(tcpu, policy.cpus)
-                       cpumask_clear_cpu(tcpu, mask);
+               cpumask_andnot(&mask, &mask, policy.cpus);
        }
+out:
+       put_online_cpus();
 }
 
-static char throttle_reason[][30] = {
-                                       "No throttling",
-                                       "Power Cap",
-                                       "Processor Over Temperature",
-                                       "Power Supply Failure",
-                                       "Over Current",
-                                       "OCC Reset"
-                                    };
-
 static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
                                   unsigned long msg_type, void *_msg)
 {
@@ -481,7 +488,7 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
                 */
                if (!throttled) {
                        throttled = true;
-                       pr_crit("CPU frequency is throttled for duration\n");
+                       pr_warn("CPU frequency is throttled for duration\n");
                }
 
                break;
@@ -505,23 +512,18 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
                        return 0;
                }
 
-               if (omsg.throttle_status &&
+               for (i = 0; i < nr_chips; i++)
+                       if (chips[i].id == omsg.chip)
+                               break;
+
+               if (omsg.throttle_status >= 0 &&
                    omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
-                       pr_info("OCC: Chip %u Pmax reduced due to %s\n",
-                               (unsigned int)omsg.chip,
-                               throttle_reason[omsg.throttle_status]);
-               else if (!omsg.throttle_status)
-                       pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
-                               throttle_reason[omsg.throttle_status]);
-               else
-                       return 0;
+                       chips[i].throttle_reason = omsg.throttle_status;
 
-               for (i = 0; i < nr_chips; i++)
-                       if (chips[i].id == omsg.chip) {
-                               if (!omsg.throttle_status)
-                                       chips[i].restore = true;
-                               schedule_work(&chips[i].throttle);
-                       }
+               if (!omsg.throttle_status)
+                       chips[i].restore = true;
+
+               schedule_work(&chips[i].throttle);
        }
        return 0;
 }
@@ -556,29 +558,41 @@ static int init_chip_info(void)
        unsigned int chip[256];
        unsigned int cpu, i;
        unsigned int prev_chip_id = UINT_MAX;
+       cpumask_t cpu_mask;
+       int ret = -ENOMEM;
+
+       core_to_chip_map = kcalloc(cpu_nr_cores(), sizeof(unsigned int),
+                                  GFP_KERNEL);
+       if (!core_to_chip_map)
+               goto out;
 
-       for_each_possible_cpu(cpu) {
+       cpumask_copy(&cpu_mask, cpu_possible_mask);
+       for_each_cpu(cpu, &cpu_mask) {
                unsigned int id = cpu_to_chip_id(cpu);
 
                if (prev_chip_id != id) {
                        prev_chip_id = id;
                        chip[nr_chips++] = id;
                }
+               core_to_chip_map[cpu_core_index_of_thread(cpu)] = id;
+               cpumask_andnot(&cpu_mask, &cpu_mask, cpu_sibling_mask(cpu));
        }
 
-       chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+       chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
        if (!chips)
-               return -ENOMEM;
+               goto free_chip_map;
 
        for (i = 0; i < nr_chips; i++) {
                chips[i].id = chip[i];
-               chips[i].throttled = false;
                cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
                INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
-               chips[i].restore = false;
        }
 
        return 0;
+free_chip_map:
+       kfree(core_to_chip_map);
+out:
+       return ret;
 }
 
 static int __init powernv_cpufreq_init(void)
@@ -612,6 +626,8 @@ static void __exit powernv_cpufreq_exit(void)
        unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
        opal_message_notifier_unregister(OPAL_MSG_OCC,
                                         &powernv_cpufreq_opal_nb);
+       kfree(chips);
+       kfree(core_to_chip_map);
        cpufreq_unregister_driver(&powernv_cpufreq_driver);
 }
 module_exit(powernv_cpufreq_exit);
index 1d99c97defa9204f52ad8605fd5f25c1d2ad0540..096377232747652f99d8a89e0681da7134b716fe 100644 (file)
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
        }
 }
 #else
-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
 {
        return 0;
 }
index 051a8a8224cd7ade8b846e62fc27d0733ba8f73b..a145b319d1717a996fb193dc3f382a22030d5327 100644 (file)
@@ -576,10 +576,8 @@ static struct cpufreq_driver s5pv210_driver = {
        .get            = cpufreq_generic_get,
        .init           = s5pv210_cpu_init,
        .name           = "s5pv210",
-#ifdef CONFIG_PM
        .suspend        = cpufreq_generic_suspend,
        .resume         = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
-#endif
 };
 
 static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
index 344058f8501a2c2ee888189950b79f615e815a02..d5657d50ac4044d5da886050e822517fe1134b6f 100644 (file)
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
 
 #define CPUIDLE_COUPLED_NOT_IDLE       (-1)
 
-static DEFINE_MUTEX(cpuidle_coupled_lock);
 static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
 
 /*
index 046423b0c5ca22aad3455101d3cafae7fc4dda87..f996efc56605a3d4739ae0bcac83099fd65b0751 100644 (file)
@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * be frozen safely.
         */
        index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
-       if (index >= 0)
+       if (index > 0)
                enter_freeze_proper(drv, dev, index);
 
        return index;
index 949af89cdac3de55c0c03c9a78d0e4ad1ae268cb..0751035b2cb047e3f26bea709bf1d1a9d910a3a6 100644 (file)
@@ -394,7 +394,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 {
        int err;
 
-       err = clk_prepare_enable(dd->iclk);
+       err = clk_enable(dd->iclk);
        if (err)
                return err;
 
@@ -424,7 +424,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 
        dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
        return 0;
 }
 
@@ -442,7 +442,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 
 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 {
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
        dd->flags &= ~AES_FLAGS_BUSY;
 
        if (dd->is_async)
@@ -2085,10 +2085,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
                goto res_err;
        }
 
-       err = atmel_aes_hw_version_init(aes_dd);
+       err = clk_prepare(aes_dd->iclk);
        if (err)
                goto res_err;
 
+       err = atmel_aes_hw_version_init(aes_dd);
+       if (err)
+               goto iclk_unprepare;
+
        atmel_aes_get_cap(aes_dd);
 
        err = atmel_aes_buff_init(aes_dd);
@@ -2121,6 +2125,8 @@ err_algs:
 err_aes_dma:
        atmel_aes_buff_cleanup(aes_dd);
 err_aes_buff:
+iclk_unprepare:
+       clk_unprepare(aes_dd->iclk);
 res_err:
        tasklet_kill(&aes_dd->done_task);
        tasklet_kill(&aes_dd->queue_task);
@@ -2149,6 +2155,8 @@ static int atmel_aes_remove(struct platform_device *pdev)
        atmel_aes_dma_cleanup(aes_dd);
        atmel_aes_buff_cleanup(aes_dd);
 
+       clk_unprepare(aes_dd->iclk);
+
        return 0;
 }
 
index 63b09e01075c9a5c34abc1c294e562909f4cf550..f8407dc7dd38a306991851152c932fe06ac6ddc9 100644 (file)
@@ -846,7 +846,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
                        SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
 
        if (req->base.complete)
                req->base.complete(&req->base, err);
@@ -859,7 +859,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
 {
        int err;
 
-       err = clk_prepare_enable(dd->iclk);
+       err = clk_enable(dd->iclk);
        if (err)
                return err;
 
@@ -886,7 +886,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
        dev_info(dd->dev,
                        "version: 0x%x\n", dd->hw_version);
 
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
 }
 
 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1536,6 +1536,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
                goto res_err;
        }
 
+       err = clk_prepare(sha_dd->iclk);
+       if (err)
+               goto res_err;
+
        atmel_sha_hw_version_init(sha_dd);
 
        atmel_sha_get_cap(sha_dd);
@@ -1547,12 +1551,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
                        if (IS_ERR(pdata)) {
                                dev_err(&pdev->dev, "platform data not available\n");
                                err = PTR_ERR(pdata);
-                               goto res_err;
+                               goto iclk_unprepare;
                        }
                }
                if (!pdata->dma_slave) {
                        err = -ENXIO;
-                       goto res_err;
+                       goto iclk_unprepare;
                }
                err = atmel_sha_dma_init(sha_dd, pdata);
                if (err)
@@ -1583,6 +1587,8 @@ err_algs:
        if (sha_dd->caps.has_dma)
                atmel_sha_dma_cleanup(sha_dd);
 err_sha_dma:
+iclk_unprepare:
+       clk_unprepare(sha_dd->iclk);
 res_err:
        tasklet_kill(&sha_dd->queue_task);
        tasklet_kill(&sha_dd->done_task);
@@ -1611,12 +1617,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
        if (sha_dd->caps.has_dma)
                atmel_sha_dma_cleanup(sha_dd);
 
-       iounmap(sha_dd->io_base);
-
-       clk_put(sha_dd->iclk);
-
-       if (sha_dd->irq >= 0)
-               free_irq(sha_dd->irq, sha_dd);
+       clk_unprepare(sha_dd->iclk);
 
        return 0;
 }
index 0643e3366e3309de88a03e687a2d5353f5715a22..c0656e7f37b5993672002a8a192dc1c9dcf0c4bd 100644 (file)
@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
                return -ENOMEM;
 
        dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
-       if (!dma->cache_pool)
+       if (!dma->padding_pool)
                return -ENOMEM;
 
        cesa->dma = dma;
index c50a247be2e0365d4cac7ccc8ce6bbb33f671354..0cb259c59916ac0cadd4a3c3f7e5d4ae7fa2a1d8 100644 (file)
@@ -496,6 +496,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
        caps->src_addr_widths = device->src_addr_widths;
        caps->dst_addr_widths = device->dst_addr_widths;
        caps->directions = device->directions;
+       caps->max_burst = device->max_burst;
        caps->residue_granularity = device->residue_granularity;
        caps->descriptor_reuse = device->descriptor_reuse;
 
index 4c30fdd092b3b1e5b7e6050b7e7d1afee6c11e57..358f9689a3f5ace77d3dfd601f1873b880704060 100644 (file)
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
 
        /* Haswell */
        { PCI_VDEVICE(INTEL, 0x9c60) },
+
+       /* Broadwell */
+       { PCI_VDEVICE(INTEL, 0x9ce0) },
+
        { }
 };
 MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
index 241ff2b1402bf95572d053c09616414ba5eb2044..0a50c18d85b8bbfb9c5574f61ebba50d57f2d617 100644 (file)
@@ -150,7 +150,7 @@ enum dw_dma_msize {
 #define DWC_CTLL_DST_INC       (0<<7)          /* DAR update/not */
 #define DWC_CTLL_DST_DEC       (1<<7)
 #define DWC_CTLL_DST_FIX       (2<<7)
-#define DWC_CTLL_SRC_INC       (0<<7)          /* SAR update/not */
+#define DWC_CTLL_SRC_INC       (0<<9)          /* SAR update/not */
 #define DWC_CTLL_SRC_DEC       (1<<9)
 #define DWC_CTLL_SRC_FIX       (2<<9)
 #define DWC_CTLL_DST_MSIZE(n)  ((n)<<11)       /* burst, #elements */
index d92d655494068992f5a689ff83604158115bee4d..e3d7fcb69b4c2e4ffc4221c8ea8ec4e360648fd7 100644 (file)
 #define GET_NUM_REGN(x)                ((x & 0x300000) >> 20) /* bits 20-21 */
 #define CHMAP_EXIST            BIT(24)
 
+/* CCSTAT register */
+#define EDMA_CCSTAT_ACTV       BIT(4)
+
 /*
  * Max of 20 segments per channel to conserve PaRAM slots
  * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&echan->vchan.lock, flags);
 }
 
+/*
+ * This limit exists to avoid a possible infinite loop when waiting for proof
+ * that a particular transfer is completed. This limit can be hit if there
+ * are large bursts to/from slow devices or the CPU is never able to catch
+ * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * RX-FIFO, as many as 55 loops have been seen.
+ */
+#define EDMA_MAX_TR_WAIT_LOOPS 1000
+
 static u32 edma_residue(struct edma_desc *edesc)
 {
        bool dst = edesc->direction == DMA_DEV_TO_MEM;
+       int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
+       struct edma_chan *echan = edesc->echan;
        struct edma_pset *pset = edesc->pset;
        dma_addr_t done, pos;
        int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
         * We always read the dst/src position from the first RamPar
         * pset. That's the one which is active now.
         */
-       pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
+       pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+
+       /*
+        * "pos" may represent a transfer request that is still being
+        * processed by the EDMACC or EDMATC. We will busy wait until
+        * any one of the situations occurs:
+        *   1. the DMA hardware is idle
+        *   2. a new transfer request is setup
+        *   3. we hit the loop limit
+        */
+       while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
+               /* check if a new transfer request is setup */
+               if (edma_get_position(echan->ecc,
+                                     echan->slot[0], dst) != pos) {
+                       break;
+               }
+
+               if (!--loop_count) {
+                       dev_dbg_ratelimited(echan->vchan.chan.device->dev,
+                               "%s: timeout waiting for PaRAM update\n",
+                               __func__);
+                       break;
+               }
+
+               cpu_relax();
+       }
 
        /*
         * Cyclic is simple. Just subtract pset[0].addr from pos.
index 57ff46284f159eabca5c9b6b28320414605d150f..21f08cc3352b97fbf047cd8661a2191e1988e0f6 100644 (file)
@@ -421,23 +421,25 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
                        desc->size);
        }
 
-       switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
-       case M2P_INTERRUPT_STALL:
-               /* Disable interrupts */
-               control = readl(edmac->regs + M2P_CONTROL);
-               control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
-               m2p_set_control(edmac, control);
-
-               return INTERRUPT_DONE;
-
-       case M2P_INTERRUPT_NFB:
-               if (ep93xx_dma_advance_active(edmac))
-                       m2p_fill_desc(edmac);
+       /*
+        * Even latest E2 silicon revision sometimes assert STALL interrupt
+        * instead of NFB. Therefore we treat them equally, basing on the
+        * amount of data we still have to transfer.
+        */
+       if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
+               return INTERRUPT_UNKNOWN;
 
+       if (ep93xx_dma_advance_active(edmac)) {
+               m2p_fill_desc(edmac);
                return INTERRUPT_NEXT_BUFFER;
        }
 
-       return INTERRUPT_UNKNOWN;
+       /* Disable interrupts */
+       control = readl(edmac->regs + M2P_CONTROL);
+       control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+       m2p_set_control(edmac, control);
+
+       return INTERRUPT_DONE;
 }
 
 /*
index 1d5df2ef148b16d3c379a11e14a7da5283f9d5b8..21539d5c54c3d5c2d2dc3244650691bf414cf879 100644 (file)
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
                        return;
        }
 
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+
+       /* handle the no-actives case */
+       if (!ioat_ring_active(ioat_chan)) {
+               spin_lock_bh(&ioat_chan->prep_lock);
+               check_active(ioat_chan);
+               spin_unlock_bh(&ioat_chan->prep_lock);
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               return;
+       }
+
        /* if we haven't made progress and we have already
         * acknowledged a pending completion once, then be more
         * forceful with a restart
         */
-       spin_lock_bh(&ioat_chan->cleanup_lock);
        if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
                __cleanup(ioat_chan, phys_complete);
        else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+               u32 chanerr;
+
+               chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+               dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
+               dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+                        status, chanerr);
+               dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
+                        ioat_ring_active(ioat_chan));
+
                spin_lock_bh(&ioat_chan->prep_lock);
                ioat_restart_channel(ioat_chan);
                spin_unlock_bh(&ioat_chan->prep_lock);
                spin_unlock_bh(&ioat_chan->cleanup_lock);
                return;
-       } else {
+       } else
                set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
-               mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-       }
-
 
-       if (ioat_ring_active(ioat_chan))
-               mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-       else {
-               spin_lock_bh(&ioat_chan->prep_lock);
-               check_active(ioat_chan);
-               spin_unlock_bh(&ioat_chan->prep_lock);
-       }
+       mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
        spin_unlock_bh(&ioat_chan->cleanup_lock);
 }
 
index 6bb4a13a8fbd2f4306179384fc0afb23bf4d2d31..243421af888f09fedb65682eaeb51aea8efc85cb 100644 (file)
@@ -26,7 +26,7 @@
 #include "hw.h"
 #include "dma.h"
 
-#define MAX_SCF        1024
+#define MAX_SCF        256
 
 /* provide a lookup table for setting the source address in the base or
  * extended descriptor of an xor or pq descriptor
index 17ee758b419ffbf6e10722fbcac95a08a96e4892..1b0453b9e32df946eab574746055fc5a80389a9e 100644 (file)
@@ -33,6 +33,9 @@
 #define PL330_MAX_CHAN         8
 #define PL330_MAX_IRQS         32
 #define PL330_MAX_PERI         32
+#define PL330_MAX_BURST         16
+
+#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
 
 enum pl330_cachectrl {
        CCTRL0,         /* Noncacheable and nonbufferable */
@@ -488,6 +491,17 @@ struct pl330_dmac {
        /* Peripheral channels connected to this DMAC */
        unsigned int num_peripherals;
        struct dma_pl330_chan *peripherals; /* keep at end */
+       int quirks;
+};
+
+static struct pl330_of_quirks {
+       char *quirk;
+       int id;
+} of_quirks[] = {
+       {
+               .quirk = "arm,pl330-broken-no-flushp",
+               .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
+       }
 };
 
 struct dma_pl330_desc {
@@ -1137,47 +1151,68 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
        return off;
 }
 
-static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
+                                u8 buf[], const struct _xfer_spec *pxs,
+                                int cyc)
 {
        int off = 0;
+       enum pl330_cond cond;
+
+       if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+               cond = BURST;
+       else
+               cond = (pxs->desc->rqcfg.brst_len == 1) ? SINGLE : BURST;
 
        while (cyc--) {
-               off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
-               off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+               off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
+               off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
                off += _emit_ST(dry_run, &buf[off], ALWAYS);
-               off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+
+               if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+                       off += _emit_FLUSHP(dry_run, &buf[off],
+                                           pxs->desc->peri);
        }
 
        return off;
 }
 
-static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+                                unsigned dry_run, u8 buf[],
+                                const struct _xfer_spec *pxs, int cyc)
 {
        int off = 0;
+       enum pl330_cond cond;
+
+       if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+               cond = BURST;
+       else
+               cond = (pxs->desc->rqcfg.brst_len == 1) ? SINGLE : BURST;
+
 
        while (cyc--) {
-               off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+               off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
                off += _emit_LD(dry_run, &buf[off], ALWAYS);
-               off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
-               off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+               off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
+
+               if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+                       off += _emit_FLUSHP(dry_run, &buf[off],
+                                           pxs->desc->peri);
        }
 
        return off;
 }
 
-static int _bursts(unsigned dry_run, u8 buf[],
+static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
                const struct _xfer_spec *pxs, int cyc)
 {
        int off = 0;
 
        switch (pxs->desc->rqtype) {
        case DMA_MEM_TO_DEV:
-               off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+               off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
                break;
        case DMA_DEV_TO_MEM:
-               off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+               off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
                break;
        case DMA_MEM_TO_MEM:
                off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
@@ -1191,7 +1226,7 @@ static int _bursts(unsigned dry_run, u8 buf[],
 }
 
 /* Returns bytes consumed and updates bursts */
-static inline int _loop(unsigned dry_run, u8 buf[],
+static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
                unsigned long *bursts, const struct _xfer_spec *pxs)
 {
        int cyc, cycmax, szlp, szlpend, szbrst, off;
@@ -1199,7 +1234,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        struct _arg_LPEND lpend;
 
        if (*bursts == 1)
-               return _bursts(dry_run, buf, pxs, 1);
+               return _bursts(pl330, dry_run, buf, pxs, 1);
 
        /* Max iterations possible in DMALP is 256 */
        if (*bursts >= 256*256) {
@@ -1217,7 +1252,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        }
 
        szlp = _emit_LP(1, buf, 0, 0);
-       szbrst = _bursts(1, buf, pxs, 1);
+       szbrst = _bursts(pl330, 1, buf, pxs, 1);
 
        lpend.cond = ALWAYS;
        lpend.forever = false;
@@ -1249,7 +1284,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
        ljmp1 = off;
 
-       off += _bursts(dry_run, &buf[off], pxs, cyc);
+       off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
 
        lpend.cond = ALWAYS;
        lpend.forever = false;
@@ -1272,8 +1307,9 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        return off;
 }
 
-static inline int _setup_loops(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs)
+static inline int _setup_loops(struct pl330_dmac *pl330,
+                              unsigned dry_run, u8 buf[],
+                              const struct _xfer_spec *pxs)
 {
        struct pl330_xfer *x = &pxs->desc->px;
        u32 ccr = pxs->ccr;
@@ -1282,15 +1318,16 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[],
 
        while (bursts) {
                c = bursts;
-               off += _loop(dry_run, &buf[off], &c, pxs);
+               off += _loop(pl330, dry_run, &buf[off], &c, pxs);
                bursts -= c;
        }
 
        return off;
 }
 
-static inline int _setup_xfer(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs)
+static inline int _setup_xfer(struct pl330_dmac *pl330,
+                             unsigned dry_run, u8 buf[],
+                             const struct _xfer_spec *pxs)
 {
        struct pl330_xfer *x = &pxs->desc->px;
        int off = 0;
@@ -1301,7 +1338,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
        off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
 
        /* Setup Loop(s) */
-       off += _setup_loops(dry_run, &buf[off], pxs);
+       off += _setup_loops(pl330, dry_run, &buf[off], pxs);
 
        return off;
 }
@@ -1310,8 +1347,9 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
  * A req is a sequence of one or more xfer units.
  * Returns the number of bytes taken to setup the MC for the req.
  */
-static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
-               unsigned index, struct _xfer_spec *pxs)
+static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
+                     struct pl330_thread *thrd, unsigned index,
+                     struct _xfer_spec *pxs)
 {
        struct _pl330_req *req = &thrd->req[index];
        struct pl330_xfer *x;
@@ -1328,7 +1366,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
        if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
                return -EINVAL;
 
-       off += _setup_xfer(dry_run, &buf[off], pxs);
+       off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
 
        /* DMASEV peripheral/event */
        off += _emit_SEV(dry_run, &buf[off], thrd->ev);
@@ -1422,7 +1460,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
        xs.desc = desc;
 
        /* First dry run to check if req is acceptable */
-       ret = _setup_req(1, thrd, idx, &xs);
+       ret = _setup_req(pl330, 1, thrd, idx, &xs);
        if (ret < 0)
                goto xfer_exit;
 
@@ -1436,7 +1474,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
        /* Hook the request */
        thrd->lstenq = idx;
        thrd->req[idx].desc = desc;
-       _setup_req(0, thrd, idx, &xs);
+       _setup_req(pl330, 0, thrd, idx, &xs);
 
        ret = 0;
 
@@ -2560,7 +2598,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 
                desc->rqtype = direction;
                desc->rqcfg.brst_size = pch->burst_sz;
-               desc->rqcfg.brst_len = 1;
+               desc->rqcfg.brst_len = pch->burst_len;
                desc->bytes_requested = period_len;
                fill_px(&desc->px, dst, src, period_len);
 
@@ -2705,7 +2743,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                }
 
                desc->rqcfg.brst_size = pch->burst_sz;
-               desc->rqcfg.brst_len = 1;
+               desc->rqcfg.brst_len = pch->burst_len;
                desc->rqtype = direction;
                desc->bytes_requested = sg_dma_len(sg);
        }
@@ -2781,6 +2819,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        struct resource *res;
        int i, ret, irq;
        int num_chan;
+       struct device_node *np = adev->dev.of_node;
 
        pdat = dev_get_platdata(&adev->dev);
 
@@ -2800,6 +2839,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
 
+       /* get quirk */
+       for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
+               if (of_property_read_bool(np, of_quirks[i].quirk))
+                       pl330->quirks |= of_quirks[i].id;
+
        res = &adev->res;
        pl330->base = devm_ioremap_resource(&adev->dev, res);
        if (IS_ERR(pl330->base))
@@ -2895,6 +2939,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
        pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
+                        1 : PL330_MAX_BURST);
 
        ret = dma_async_device_register(pd);
        if (ret) {
index f32c430eb16cfae8145edcba85ee0a9d8007882c..6e0685f1a83814f4484e65d1ae0467b0ffdbed14 100644 (file)
@@ -12,7 +12,7 @@ config RENESAS_DMA
 
 config SH_DMAE_BASE
        bool "Renesas SuperH DMA Engine support"
-       depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+       depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
        depends on !SUPERH || SH_DMA
        depends on !SH_DMA_API
        default y
@@ -41,7 +41,7 @@ endif
 
 config RCAR_DMAC
        tristate "Renesas R-Car Gen2 DMA Controller"
-       depends on ARCH_SHMOBILE || COMPILE_TEST
+       depends on ARCH_RENESAS || COMPILE_TEST
        select RENESAS_DMA
        help
          This driver supports the general purpose DMA controller found in the
@@ -49,7 +49,7 @@ config RCAR_DMAC
 
 config RENESAS_USB_DMAC
        tristate "Renesas USB-DMA Controller"
-       depends on ARCH_SHMOBILE || COMPILE_TEST
+       depends on ARCH_RENESAS || COMPILE_TEST
        select RENESAS_DMA
        select DMA_VIRTUAL_CHANNELS
        help
index f25cd79c8a79f834f8d3e169684d20ee4ced08e6..11bfee8b79a9f65418bcbe53dfc708edb220e69d 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) "psci: " fmt
 
 #include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
 #include <linux/of.h>
 #include <linux/printk.h>
 #include <linux/psci.h>
 #include <linux/reboot.h>
+#include <linux/slab.h>
 #include <linux/suspend.h>
 
 #include <uapi/linux/psci.h>
 
+#include <asm/cpuidle.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
 #include <asm/smp_plat.h>
@@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
                              psci_func_id, 0, 0);
 }
 
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+       int i, ret, count = 0;
+       u32 *psci_states;
+       struct device_node *state_node;
+
+       /*
+        * If the PSCI cpu_suspend function hook has not been initialized
+        * idle states must not be enabled, so bail out
+        */
+       if (!psci_ops.cpu_suspend)
+               return -EOPNOTSUPP;
+
+       /* Count idle states */
+       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+                                             count))) {
+               count++;
+               of_node_put(state_node);
+       }
+
+       if (!count)
+               return -ENODEV;
+
+       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+       if (!psci_states)
+               return -ENOMEM;
+
+       for (i = 0; i < count; i++) {
+               u32 state;
+
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+               ret = of_property_read_u32(state_node,
+                                          "arm,psci-suspend-param",
+                                          &state);
+               if (ret) {
+                       pr_warn(" * %s missing arm,psci-suspend-param property\n",
+                               state_node->full_name);
+                       of_node_put(state_node);
+                       goto free_mem;
+               }
+
+               of_node_put(state_node);
+               pr_debug("psci-power-state %#x index %d\n", state, i);
+               if (!psci_power_state_is_valid(state)) {
+                       pr_warn("Invalid PSCI power state %#x\n", state);
+                       ret = -EINVAL;
+                       goto free_mem;
+               }
+               psci_states[i] = state;
+       }
+       /* Idle states parsed correctly, initialize per-cpu pointer */
+       per_cpu(psci_power_state, cpu) = psci_states;
+       return 0;
+
+free_mem:
+       kfree(psci_states);
+       return ret;
+}
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+       struct device_node *cpu_node;
+       int ret;
+
+       cpu_node = of_get_cpu_node(cpu, NULL);
+       if (!cpu_node)
+               return -ENODEV;
+
+       ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static int psci_suspend_finisher(unsigned long index)
+{
+       u32 *state = __this_cpu_read(psci_power_state);
+
+       return psci_ops.cpu_suspend(state[index - 1],
+                                   virt_to_phys(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long index)
+{
+       int ret;
+       u32 *state = __this_cpu_read(psci_power_state);
+       /*
+        * idle state index 0 corresponds to wfi, should never be called
+        * from the cpu_suspend operations
+        */
+       if (WARN_ON_ONCE(!index))
+               return -EINVAL;
+
+       if (!psci_power_state_loses_context(state[index - 1]))
+               ret = psci_ops.cpu_suspend(state[index - 1], 0);
+       else
+               ret = cpu_suspend(index, psci_suspend_finisher);
+
+       return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+       .suspend = psci_cpu_suspend_enter,
+       .init = psci_dt_cpu_init_idle,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+#endif
+#endif
+
 static int psci_system_suspend(unsigned long unused)
 {
        return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
index 0883292f640f4d512c8b198d90a65ed945c02f87..c1e43259c044abee406e77e5f15a33670534857f 100644 (file)
@@ -499,3 +499,85 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
        return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
                req, req_cnt * sizeof(*req), resp, sizeof(*resp));
 }
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+       __le32 out;
+       __le32 in;
+       int ret;
+
+       in = cpu_to_le32(peripheral);
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+                           &in, sizeof(in),
+                           &out, sizeof(out));
+
+       return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys)
+{
+       __le32 scm_ret;
+       int ret;
+       struct {
+               __le32 proc;
+               __le32 image_addr;
+       } request;
+
+       request.proc = cpu_to_le32(peripheral);
+       request.image_addr = cpu_to_le32(metadata_phys);
+
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+                           &request, sizeof(request),
+                           &scm_ret, sizeof(scm_ret));
+
+       return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+       __le32 scm_ret;
+       int ret;
+       struct {
+               __le32 proc;
+               __le32 addr;
+               __le32 len;
+       } request;
+
+       request.proc = cpu_to_le32(peripheral);
+       request.addr = cpu_to_le32(addr);
+       request.len = cpu_to_le32(size);
+
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+                           &request, sizeof(request),
+                           &scm_ret, sizeof(scm_ret));
+
+       return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+       __le32 out;
+       __le32 in;
+       int ret;
+
+       in = cpu_to_le32(peripheral);
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+                           &in, sizeof(in),
+                           &out, sizeof(out));
+
+       return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+       __le32 out;
+       __le32 in;
+       int ret;
+
+       in = cpu_to_le32(peripheral);
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+                           &in, sizeof(in),
+                           &out, sizeof(out));
+
+       return ret ? : le32_to_cpu(out);
+}
index bb6555f6d63b849d4bb3330fed5de6d01e31edd2..e64fd927e5ae5b2396a827f5581ae23af239dbad 100644 (file)
@@ -61,3 +61,28 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
 {
        return -ENOTSUPP;
 }
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+       return false;
+}
+
+int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys)
+{
+       return -ENOTSUPP;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+       return -ENOTSUPP;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+       return -ENOTSUPP;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+       return -ENOTSUPP;
+}
index 45c008d688914fcbd63eb47f059bf0ac679761dd..6fc9580a26bd96159b1560a043ba218c420d8a32 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
-
+#include <linux/platform_device.h>
+#include <linux/module.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/dma-mapping.h>
 #include <linux/types.h>
 #include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/clk.h>
 
 #include "qcom_scm.h"
 
+struct qcom_scm {
+       struct device *dev;
+       struct clk *core_clk;
+       struct clk *iface_clk;
+       struct clk *bus_clk;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+       int ret;
+
+       ret = clk_prepare_enable(__scm->core_clk);
+       if (ret)
+               goto bail;
+       ret = clk_prepare_enable(__scm->iface_clk);
+       if (ret)
+               goto disable_core;
+       ret = clk_prepare_enable(__scm->bus_clk);
+       if (ret)
+               goto disable_iface;
+
+       return 0;
+
+disable_iface:
+       clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+       clk_disable_unprepare(__scm->core_clk);
+bail:
+       return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+       clk_disable_unprepare(__scm->core_clk);
+       clk_disable_unprepare(__scm->iface_clk);
+       clk_disable_unprepare(__scm->bus_clk);
+}
+
 /**
  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
  * @entry: Entry point function for the cpus
@@ -72,11 +112,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down);
  */
 bool qcom_scm_hdcp_available(void)
 {
-       int ret;
+       int ret = qcom_scm_clk_enable();
+
+       if (ret)
+               goto clk_err;
 
        ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP,
-               QCOM_SCM_CMD_HDCP);
+                                               QCOM_SCM_CMD_HDCP);
 
+       qcom_scm_clk_disable();
+
+clk_err:
        return (ret > 0) ? true : false;
 }
 EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -91,6 +137,215 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available);
  */
 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
 {
-       return __qcom_scm_hdcp_req(req, req_cnt, resp);
+       int ret = qcom_scm_clk_enable();
+
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_hdcp_req(req, req_cnt, resp);
+       qcom_scm_clk_disable();
+       return ret;
 }
 EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ *                           available for the given peripherial
+ * @peripheral:        peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+       int ret;
+
+       ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_PIL,
+                                          QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+       if (ret <= 0)
+               return false;
+
+       return __qcom_scm_pas_supported(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ *                            state machine for a given peripheral, using the
+ *                            metadata
+ * @peripheral: peripheral id
+ * @metadata:  pointer to memory containing ELF header, program header table
+ *             and optional blob of data used for authenticating the metadata
+ *             and the rest of the firmware
+ * @size:      size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+       dma_addr_t mdata_phys;
+       void *mdata_buf;
+       int ret;
+
+       /*
+        * During the scm call memory protection will be enabled for the meta
+        * data blob, so make sure it's physically contiguous, 4K aligned and
+        * non-cachable to avoid XPU violations.
+        */
+       mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL);
+       if (!mdata_buf) {
+               dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+               return -ENOMEM;
+       }
+       memcpy(mdata_buf, metadata, size);
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               goto free_metadata;
+
+       ret = __qcom_scm_pas_init_image(peripheral, mdata_phys);
+
+       qcom_scm_clk_disable();
+
+free_metadata:
+       dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ *                           for firmware loading
+ * @peripheral:        peripheral id
+ * @addr:      start address of memory area to prepare
+ * @size:      size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+       int ret;
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_pas_mem_setup(peripheral, addr, size);
+       qcom_scm_clk_disable();
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ *                                and reset the remote processor
+ * @peripheral:        peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+       int ret;
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_pas_auth_and_reset(peripheral);
+       qcom_scm_clk_disable();
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+       int ret;
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_pas_shutdown(peripheral);
+       qcom_scm_clk_disable();
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+       return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+       struct qcom_scm *scm;
+       long rate;
+       int ret;
+
+       scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+       if (!scm)
+               return -ENOMEM;
+
+       scm->dev = &pdev->dev;
+
+       scm->core_clk = devm_clk_get(&pdev->dev, "core");
+       if (IS_ERR(scm->core_clk)) {
+               if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to acquire core clk\n");
+               return PTR_ERR(scm->core_clk);
+       }
+
+       scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+       if (IS_ERR(scm->iface_clk)) {
+               if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to acquire iface clk\n");
+               return PTR_ERR(scm->iface_clk);
+       }
+
+       scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+       if (IS_ERR(scm->bus_clk)) {
+               if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to acquire bus clk\n");
+               return PTR_ERR(scm->bus_clk);
+       }
+
+       /* vote for max clk rate for highest performance */
+       rate = clk_round_rate(scm->core_clk, INT_MAX);
+       ret = clk_set_rate(scm->core_clk, rate);
+       if (ret)
+               return ret;
+
+       __scm = scm;
+
+       return 0;
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+       { .compatible = "qcom,scm",},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+       .driver = {
+               .name   = "qcom_scm",
+               .of_match_table = qcom_scm_dt_match,
+       },
+       .probe = qcom_scm_probe,
+};
+
+builtin_platform_driver(qcom_scm_driver);
index 2cce75c08b9989329f8e72a58b41675e5f0a575e..220d19c93cfc8328fb386fa954a283e312a3f499 100644 (file)
@@ -36,6 +36,18 @@ extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
 extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
                u32 *resp);
 
+#define QCOM_SCM_SVC_PIL               0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD    0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD     0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD        0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD      0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD  0x7
+extern bool __qcom_scm_pas_supported(u32 peripheral);
+extern int  __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys);
+extern int  __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int  __qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int  __qcom_scm_pas_shutdown(u32 peripheral);
+
 /* common error codes */
 #define QCOM_SCM_ENOMEM                -5
 #define QCOM_SCM_EOPNOTSUPP    -4
index 66f729eaf00bb99f09ef5960daf8c7630ceccb96..20c9539abc36e887dea47d00982e308be412d93e 100644 (file)
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
 
 # add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
        ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
        amdgpu_amdkfd_gfx_v7.o
 
@@ -34,6 +34,7 @@ amdgpu-y += \
 
 # add GMC block
 amdgpu-y += \
+       gmc_v7_0.o \
        gmc_v8_0.o
 
 # add IH block
index 313b0cc8d676d90d1f4073c28579f096afea1a74..82edf95b7740d7fe9070cba62009374c08c34562 100644 (file)
@@ -2278,60 +2278,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
 
 #define amdgpu_dpm_get_temperature(adev) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
-             (adev)->pm.funcs->get_temperature((adev))
+             (adev)->pm.funcs->get_temperature((adev)))
 
 #define amdgpu_dpm_set_fan_control_mode(adev, m) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
-             (adev)->pm.funcs->set_fan_control_mode((adev), (m))
+             (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
 
 #define amdgpu_dpm_get_fan_control_mode(adev) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
-             (adev)->pm.funcs->get_fan_control_mode((adev))
+             (adev)->pm.funcs->get_fan_control_mode((adev)))
 
 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-             (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
+             (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
 
 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-             (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
+             (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
 
 #define amdgpu_dpm_get_sclk(adev, l) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
-               (adev)->pm.funcs->get_sclk((adev), (l))
+               (adev)->pm.funcs->get_sclk((adev), (l)))
 
 #define amdgpu_dpm_get_mclk(adev, l)  \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
-             (adev)->pm.funcs->get_mclk((adev), (l))
+             (adev)->pm.funcs->get_mclk((adev), (l)))
 
 
 #define amdgpu_dpm_force_performance_level(adev, l) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
-             (adev)->pm.funcs->force_performance_level((adev), (l))
+             (adev)->pm.funcs->force_performance_level((adev), (l)))
 
 #define amdgpu_dpm_powergate_uvd(adev, g) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
-             (adev)->pm.funcs->powergate_uvd((adev), (g))
+             (adev)->pm.funcs->powergate_uvd((adev), (g)))
 
 #define amdgpu_dpm_powergate_vce(adev, g) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
-             (adev)->pm.funcs->powergate_vce((adev), (g))
+             (adev)->pm.funcs->powergate_vce((adev), (g)))
 
 #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
-             (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
+             (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
 
 #define amdgpu_dpm_get_current_power_state(adev) \
        (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
index 0e1376317683e4de30803a987450dba3e01b9621..362bedc9e50791ee6a9b1e60f4885a87ba158677 100644 (file)
@@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version
 };
 
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
 {
        return (struct kfd2kgd_calls *)&kfd2kgd;
 }
index 79fa5c7de856eab635ea9010ab0b8bfc446c37c2..04b744d64b57a6355f38ff071f48836148880257 100644 (file)
@@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version
 };
 
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
 {
        return (struct kfd2kgd_calls *)&kfd2kgd;
 }
index 6f89f8e034d0eb05b96378581f8dfa7dee0fb319..b882e8175615fba482ac9c8b8f29561e7549dc18 100644 (file)
@@ -478,9 +478,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
        struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
        unsigned i;
 
-       amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
        if (!error) {
+               amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
+
                /* Sort the buffer list from the smallest to largest buffer,
                 * which affects the order of buffers in the LRU list.
                 * This assures that the smallest buffers are added first
index b5dbbb57349190ce4b28409547781c515283994f..9c1af8976befea20cf371cc4a15f226dbe434d75 100644 (file)
@@ -256,11 +256,11 @@ static struct pci_device_id pciidlist[] = {
        {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
        /* topaz */
-       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
        /* tonga */
        {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
        {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
index cfb6caad2a73a4264ab9386f1c6616cf75ef0c77..919146780a15a91434f7383e666eedabdf402ca0 100644 (file)
@@ -333,6 +333,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
        if (!adev->mode_info.mode_config_initialized)
                return 0;
 
+       /* don't init fbdev if there are no connectors */
+       if (list_empty(&adev->ddev->mode_config.connector_list))
+               return 0;
+
        /* select 8 bpp console on low vram cards */
        if (adev->mc.real_vram_size <= (32*1024*1024))
                bpp_sel = 8;
index c3ce103b6a33b2ef3a0a6f7f14877e18150ca405..b8fbbd7699e4586e13e57905b0cef818f6e43e6b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                       AMDGPU_GEM_DOMAIN_OA);
 
        bo->flags = flags;
+
+       /* For architectures that don't support WC memory,
+        * mask out the WC flag from the BO
+        */
+       if (!drm_arch_can_wc_memory())
+               bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
        amdgpu_fill_placement_to_bo(bo, placement);
        /* Kernel allocation are uninterruptible */
        r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                }
                if (fpfn > bo->placements[i].fpfn)
                        bo->placements[i].fpfn = fpfn;
-               if (lpfn && lpfn < bo->placements[i].lpfn)
+               if (!bo->placements[i].lpfn ||
+                   (lpfn && lpfn < bo->placements[i].lpfn))
                        bo->placements[i].lpfn = lpfn;
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
index 5ee9a069027885989f8aad55e9340e1a9d3424ea..b9d0d55f6b47df33ab7ebf7024fe7ca6e821892a 100644 (file)
@@ -99,13 +99,24 @@ static int amdgpu_pp_early_init(void *handle)
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
        switch (adev->asic_type) {
-               case CHIP_TONGA:
-               case CHIP_FIJI:
-                       adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
-                       break;
-               default:
-                       adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
-                       break;
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+               adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+               break;
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+               adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+               break;
+       /* These chips don't have powerplay implemenations */
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+       case CHIP_KAVERI:
+       case CHIP_TOPAZ:
+       default:
+               adev->pp_enabled = false;
+               break;
        }
 #else
        adev->pp_enabled = false;
index 78e9b0f14661a0d0f404f67df3789b052dcd9133..d1f234dd21261e06764b9e5b623d11ff08f20fe4 100644 (file)
@@ -487,7 +487,7 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
        seq_printf(m, "rptr: 0x%08x [%5d]\n",
                   rptr, rptr);
 
-       rptr_next = ~0;
+       rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
 
        seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
                   ring->wptr, ring->wptr);
index 8a1752ff3d8e55a1d8b8ab3061f5c9c425058d0f..55cf05e1c81c398fc747f598d1ce0bc78a38a435 100644 (file)
@@ -808,7 +808,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                        flags |= AMDGPU_PTE_SNOOPED;
        }
 
-       if (adev->asic_type >= CHIP_TOPAZ)
+       if (adev->asic_type >= CHIP_TONGA)
                flags |= AMDGPU_PTE_EXECUTABLE;
 
        flags |= AMDGPU_PTE_READABLE;
index aefc668e6b5d79e65873e973275d5f0beb7b10d3..9599f7559b3dc86308d2c375739d0cd20ee60941 100644 (file)
@@ -1282,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
                AMDGPU_VM_PTE_COUNT * 8);
-       unsigned pd_size, pd_entries, pts_size;
+       unsigned pd_size, pd_entries;
        int i, r;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1300,8 +1300,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        pd_entries = amdgpu_vm_num_pdes(adev);
 
        /* allocate page table array */
-       pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
-       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+       vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
        if (vm->page_tables == NULL) {
                DRM_ERROR("Cannot allocate memory for page table array\n");
                return -ENOMEM;
@@ -1361,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
                amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
-       kfree(vm->page_tables);
+       drm_free_large(vm->page_tables);
 
        amdgpu_bo_unref(&vm->page_directory);
        fence_put(vm->page_directory_fence);
index 72793f93e2fcc9989e5f936ec1d43ba0593ac69e..6c76139de1c9c5992579a7eb44d1f6f4eff9da9a 100644 (file)
@@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle)
        return 0;
 }
 
+static int gfx_v7_0_late_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+       if (r)
+               return r;
+
+       return 0;
+}
+
 static int gfx_v7_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
@@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+       amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
        gfx_v7_0_cp_enable(adev, false);
        gfx_v7_0_rlc_stop(adev);
        gfx_v7_0_fini_pg(adev);
@@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
 
 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
        .early_init = gfx_v7_0_early_init,
-       .late_init = NULL,
+       .late_init = gfx_v7_0_late_init,
        .sw_init = gfx_v7_0_sw_init,
        .sw_fini = gfx_v7_0_sw_fini,
        .hw_init = gfx_v7_0_hw_init,
index 13235d84e5a67d45ca52bf4de9536d1c22a26dc6..8f8ec37ecd883599b416773a0a6a579da6131515 100644 (file)
@@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       if (adev->asic_type != CHIP_STONEY) {
+       if ((adev->asic_type != CHIP_STONEY) &&
+           (adev->asic_type != CHIP_TOPAZ)) {
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
                err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
                if (!err) {
@@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
                        if (r)
                                return -EINVAL;
 
-                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                                       AMDGPU_UCODE_ID_CP_MEC1);
-                       if (r)
-                               return -EINVAL;
+                       if (adev->asic_type == CHIP_TOPAZ) {
+                               r = gfx_v8_0_cp_compute_load_microcode(adev);
+                               if (r)
+                                       return r;
+                       } else {
+                               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                                                AMDGPU_UCODE_ID_CP_MEC1);
+                               if (r)
+                                       return -EINVAL;
+                       }
                }
        }
 
@@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+       amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
        gfx_v8_0_cp_enable(adev, false);
        gfx_v8_0_rlc_stop(adev);
        gfx_v8_0_cp_compute_fini(adev);
@@ -4186,7 +4194,18 @@ static int gfx_v8_0_soft_reset(void *handle)
                gfx_v8_0_cp_gfx_enable(adev, false);
 
                /* Disable MEC parsing/prefetching */
-               /* XXX todo */
+               gfx_v8_0_cp_compute_enable(adev, false);
+
+               if (grbm_soft_reset || srbm_soft_reset) {
+                       tmp = RREG32(mmGMCON_DEBUG);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_STALL, 1);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_CLEAR, 1);
+                       WREG32(mmGMCON_DEBUG, tmp);
+
+                       udelay(50);
+               }
 
                if (grbm_soft_reset) {
                        tmp = RREG32(mmGRBM_SOFT_RESET);
@@ -4215,6 +4234,16 @@ static int gfx_v8_0_soft_reset(void *handle)
                        WREG32(mmSRBM_SOFT_RESET, tmp);
                        tmp = RREG32(mmSRBM_SOFT_RESET);
                }
+
+               if (grbm_soft_reset || srbm_soft_reset) {
+                       tmp = RREG32(mmGMCON_DEBUG);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_STALL, 0);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_CLEAR, 0);
+                       WREG32(mmGMCON_DEBUG, tmp);
+               }
+
                /* Wait a little for things to settle down */
                udelay(50);
                gfx_v8_0_print_status((void *)adev);
@@ -4308,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int r;
 
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+       if (r)
+               return r;
+
        /* requires IBs so do in late init after IB pool is initialized */
        r = gfx_v8_0_do_edc_gpr_workarounds(adev);
        if (r)
index 3f956065d069125df0200f78527437134bf7f6d1..8aa2991ab379af7c6d981c1e01734f6b83da3692 100644 (file)
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_iceland_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+               break;
+       default:
+               break;
+       }
+}
 
 /**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
  *
  * @adev: amdgpu_device pointer
  *
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_HAWAII:
                chip_name = "hawaii";
                break;
+       case CHIP_TOPAZ:
+               chip_name = "topaz";
+               break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
                return 0;
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       if (adev->asic_type == CHIP_TOPAZ)
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v7_0_init_golden_registers(adev);
+
        gmc_v7_0_mc_program(adev);
 
        if (!(adev->flags & AMD_IS_APU)) {
index c0c9a0101eb453c3139b9796ac8449350f0fb4bd..3efd45546241afc65e5d7db1fbc541fda946857b 100644 (file)
@@ -42,9 +42,7 @@
 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_iceland_a11[] =
-{
-       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
-       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
 static const u32 cz_mgcg_cgcg_init[] =
 {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               amdgpu_program_register_sequence(adev,
-                                                iceland_mgcg_cgcg_init,
-                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
-               amdgpu_program_register_sequence(adev,
-                                                golden_settings_iceland_a11,
-                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
-               break;
        case CHIP_FIJI:
                amdgpu_program_register_sequence(adev,
                                                 fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               chip_name = "topaz";
-               break;
        case CHIP_TONGA:
                chip_name = "tonga";
                break;
        case CHIP_FIJI:
-               chip_name = "fiji";
-               break;
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                return 0;
@@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle)
 
        gmc_v8_0_mc_program(adev);
 
-       if (!(adev->flags & AMD_IS_APU)) {
+       if (adev->asic_type == CHIP_TONGA) {
                r = gmc_v8_0_mc_load_microcode(adev);
                if (r) {
                        DRM_ERROR("Failed to load MC firmware!\n");
index 966d4b2ed9dad0527a2d0246b23731f1d802d9e2..090486c182497ba8de2aaa7840242be9cf4750aa 100644 (file)
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
                case AMDGPU_UCODE_ID_CP_ME:
                        return UCODE_ID_CP_ME_MASK;
                case AMDGPU_UCODE_ID_CP_MEC1:
-                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
                case AMDGPU_UCODE_ID_CP_MEC2:
                        return UCODE_ID_CP_MEC_MASK;
                case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-               return -EINVAL;
-       }
-
        if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
                        &toc->entry[toc->num_entries++])) {
                DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
                        UCODE_ID_CP_ME_MASK |
                        UCODE_ID_CP_PFP_MASK |
                        UCODE_ID_CP_MEC_MASK |
-                       UCODE_ID_CP_MEC_JT1_MASK |
-                       UCODE_ID_CP_MEC_JT2_MASK;
+                       UCODE_ID_CP_MEC_JT1_MASK;
+
 
        if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
                DRM_ERROR("Fail to request SMU load ucode\n");
index f4a1346525febf9c9a463e32525c635f70873a93..0497784b36523086a029bb5a08cd977c9a9f6372 100644 (file)
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
 
 static int tonga_dpm_suspend(void *handle)
 {
-       return 0;
+       return tonga_dpm_hw_fini(handle);
 }
 
 static int tonga_dpm_resume(void *handle)
 {
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       ret = tonga_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-fail:
-       mutex_unlock(&adev->pm.mutex);
-       return ret;
+       return tonga_dpm_hw_init(handle);
 }
 
 static int tonga_dpm_set_clockgating_state(void *handle,
index 652e76644c31cab37fee1964797d890749fbf0a7..89f5a1ff6f43aba335945d4f7a2643c0310623f8 100644 (file)
@@ -61,6 +61,7 @@
 #include "vi.h"
 #include "vi_dpm.h"
 #include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
 #include "gfx_v8_0.h"
 #include "sdma_v2_4.h"
 #include "sdma_v3_0.h"
@@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
        },
        {
                .type = AMD_IP_BLOCK_TYPE_GMC,
-               .major = 8,
-               .minor = 0,
+               .major = 7,
+               .minor = 4,
                .rev = 0,
-               .funcs = &gmc_v8_0_ip_funcs,
+               .funcs = &gmc_v7_0_ip_funcs,
        },
        {
                .type = AMD_IP_BLOCK_TYPE_IH,
@@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle)
                break;
        case CHIP_FIJI:
                adev->has_uvd = true;
-               adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
-                               AMDGPU_CG_SUPPORT_VCE_MGCG;
+               adev->cg_flags = 0;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x3c;
                break;
index 9be007081b72a8b5c75b4baf1fe01e92bfe792bc..a902ae037398389cf6e19c1ab7379ca64e6a46ed 100644 (file)
@@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 
        kfree(p);
 
-       kfree((void *)work);
+       kfree(work);
 }
 
 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
index 8f5d5edcf193b67a67f6990824f8ef8260e1afa5..aa67244a77ae38cf69761bc964c5f1d4400874c5 100644 (file)
@@ -64,6 +64,11 @@ static int pp_sw_init(void *handle)
        if (ret == 0)
                ret = hwmgr->hwmgr_func->backend_init(hwmgr);
 
+       if (ret)
+               printk("amdgpu: powerplay initialization failed\n");
+       else
+               printk("amdgpu: powerplay initialized\n");
+
        return ret;
 }
 
index 873a8d264d5c6aa7af31322fd5818dc1f46c7317..ec222c665602df7113cb4ec75b3fc2633d86a8a4 100644 (file)
@@ -272,6 +272,9 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
                                UCODE_ID_CP_MEC_JT1_MASK |
                                UCODE_ID_CP_MEC_JT2_MASK;
 
+       if (smumgr->chip_id == CHIP_STONEY)
+               fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
        cz_request_smu_load_fw(smumgr);
        cz_check_fw_load_finish(smumgr, fw_to_check);
 
@@ -282,7 +285,7 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
        return ret;
 }
 
-static uint8_t cz_translate_firmware_enum_to_arg(
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
                        enum cz_scratch_entry firmware_enum)
 {
        uint8_t ret = 0;
@@ -292,7 +295,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
                ret = UCODE_ID_SDMA0;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
-               ret = UCODE_ID_SDMA1;
+               if (smumgr->chip_id == CHIP_STONEY)
+                       ret = UCODE_ID_SDMA0;
+               else
+                       ret = UCODE_ID_SDMA1;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
                ret = UCODE_ID_CP_CE;
@@ -307,7 +313,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
                ret = UCODE_ID_CP_MEC_JT1;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
-               ret = UCODE_ID_CP_MEC_JT2;
+               if (smumgr->chip_id == CHIP_STONEY)
+                       ret = UCODE_ID_CP_MEC_JT1;
+               else
+                       ret = UCODE_ID_CP_MEC_JT2;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
                ret = UCODE_ID_GMCON_RENG;
@@ -396,7 +405,7 @@ static int cz_smu_populate_single_scratch_task(
        struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
 
        task->type = type;
-       task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
+       task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
        task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
 
        for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -433,7 +442,7 @@ static int cz_smu_populate_single_ucode_load_task(
        struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
 
        task->type = TASK_TYPE_UCODE_LOAD;
-       task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
+       task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
        task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
 
        for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -509,8 +518,14 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       cz_smu_populate_single_ucode_load_task(smumgr,
+
+       if (smumgr->chip_id == CHIP_STONEY)
+               cz_smu_populate_single_ucode_load_task(smumgr,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+       else
+               cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
 
@@ -551,7 +566,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
 
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-       cz_smu_populate_single_ucode_load_task(smumgr,
+       if (smumgr->chip_id == CHIP_STONEY)
+               cz_smu_populate_single_ucode_load_task(smumgr,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+       else
+               cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
@@ -561,7 +580,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       cz_smu_populate_single_ucode_load_task(smumgr,
+       if (smumgr->chip_id == CHIP_STONEY)
+               cz_smu_populate_single_ucode_load_task(smumgr,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+       else
+               cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
@@ -618,7 +641,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
 
        for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
 
-               firmware_type = cz_translate_firmware_enum_to_arg(
+               firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
                                        firmware_list[i]);
 
                ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
index 57cccd68ca522548c6c098ee72798a5b3369def0..7c523060a076f715746829004657b227a9308640 100644 (file)
@@ -946,9 +946,23 @@ static void wait_for_fences(struct drm_device *dev,
        }
 }
 
-static bool framebuffer_changed(struct drm_device *dev,
-                               struct drm_atomic_state *old_state,
-                               struct drm_crtc *crtc)
+/**
+ * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ * @crtc: DRM crtc
+ *
+ * Checks whether the framebuffer used for this CRTC changes as a result of
+ * the atomic update.  This is useful for drivers which cannot use
+ * drm_atomic_helper_wait_for_vblanks() and need to reimplement its
+ * functionality.
+ *
+ * Returns:
+ * true if the framebuffer changed.
+ */
+bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
+                                          struct drm_atomic_state *old_state,
+                                          struct drm_crtc *crtc)
 {
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state;
@@ -965,6 +979,7 @@ static bool framebuffer_changed(struct drm_device *dev,
 
        return false;
 }
+EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
 
 /**
  * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
@@ -999,7 +1014,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                if (old_state->legacy_cursor_update)
                        continue;
 
-               if (!framebuffer_changed(dev, old_state, crtc))
+               if (!drm_atomic_helper_framebuffer_changed(dev,
+                               old_state, crtc))
                        continue;
 
                ret = drm_crtc_vblank_get(crtc);
index 6ed90a2437e50438c713fc028f28705e7954b806..8ae13de272c43a7f79822098318a5c5317b54483 100644 (file)
@@ -803,12 +803,33 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
        return mstb;
 }
 
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+       struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+       if (mstb->port_parent) {
+               if (list_empty(&mstb->port_parent->next))
+                       kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+       }
+       kfree(mstb);
+}
+
 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 {
        struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
+       /*
+        * init kref again to be used by ports to remove mst branch when it is
+        * not needed anymore
+        */
+       kref_init(kref);
+
+       if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+               kref_get(&mstb->port_parent->kref);
+
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 
        if (wake_tx)
                wake_up(&mstb->mgr->tx_waitq);
-       kfree(mstb);
+
+       kref_put(kref, drm_dp_free_mst_branch_device);
 }
 
 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref)
                         * from an EDID retrieval */
 
                        mutex_lock(&mgr->destroy_connector_lock);
+                       kref_get(&port->parent->kref);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
@@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
        return send_link;
 }
 
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
-                                  struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
 {
        int ret;
-       if (port->dpcd_rev >= 0x12) {
-               port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
-               if (!port->guid_valid) {
-                       ret = drm_dp_send_dpcd_write(mstb->mgr,
-                                                    port,
-                                                    DP_GUID,
-                                                    16, port->guid);
-                       port->guid_valid = true;
+
+       memcpy(mstb->guid, guid, 16);
+
+       if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+               if (mstb->port_parent) {
+                       ret = drm_dp_send_dpcd_write(
+                                       mstb->mgr,
+                                       mstb->port_parent,
+                                       DP_GUID,
+                                       16,
+                                       mstb->guid);
+               } else {
+
+                       ret = drm_dp_dpcd_write(
+                                       mstb->mgr->aux,
+                                       DP_GUID,
+                                       mstb->guid,
+                                       16);
                }
        }
 }
@@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
        port->dpcd_rev = port_msg->dpcd_revision;
        port->num_sdp_streams = port_msg->num_sdp_streams;
        port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
-       memcpy(port->guid, port_msg->peer_guid, 16);
 
        /* manage mstb port lists with mgr lock - take a reference
           for this list */
@@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
 
        if (old_ddps != port->ddps) {
                if (port->ddps) {
-                       drm_dp_check_port_guid(mstb, port);
                        if (!port->input)
                                drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
                } else {
-                       port->guid_valid = false;
                        port->available_pbn = 0;
                        }
        }
@@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                        drm_dp_put_port(port);
                        goto out;
                }
-               if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
-                       port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
-                       drm_mode_connector_set_tile_property(port->connector);
-               }
+
+               drm_mode_connector_set_tile_property(port->connector);
+
                (*mstb->mgr->cbs->register_connector)(port->connector);
        }
-
 out:
        /* put reference to this port */
        drm_dp_put_port(port);
@@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
        port->ddps = conn_stat->displayport_device_plug_status;
 
        if (old_ddps != port->ddps) {
+               dowork = true;
                if (port->ddps) {
-                       drm_dp_check_port_guid(mstb, port);
-                       dowork = true;
                } else {
-                       port->guid_valid = false;
                        port->available_pbn = 0;
                }
        }
@@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
        struct drm_dp_mst_branch *found_mstb;
        struct drm_dp_mst_port *port;
 
+       if (memcmp(mstb->guid, guid, 16) == 0)
+               return mstb;
+
+
        list_for_each_entry(port, &mstb->ports, next) {
                if (!port->mstb)
                        continue;
 
-               if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
-                       return port->mstb;
-
                found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
 
                if (found_mstb)
@@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
        /* find the port by iterating down */
        mutex_lock(&mgr->lock);
 
-       if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
-               mstb = mgr->mst_primary;
-       else
-               mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+       mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
 
        if (mstb)
                kref_get(&mstb->kref);
@@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                if (port->input)
                        continue;
 
-               if (!port->ddps)
+               if (!port->ddps) {
+                       if (port->cached_edid) {
+                               kfree(port->cached_edid);
+                               port->cached_edid = NULL;
+                       }
                        continue;
+               }
 
                if (!port->available_pbn)
                        drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                                drm_dp_check_and_send_link_address(mgr, mstb_child);
                                drm_dp_put_mst_branch_device(mstb_child);
                        }
+               } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
+                       port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
+                       if (!port->cached_edid) {
+                               port->cached_edid =
+                                       drm_get_edid(port->connector, &port->aux.ddc);
+                       }
                }
        }
 }
@@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
                drm_dp_check_and_send_link_address(mgr, mstb);
                drm_dp_put_mst_branch_device(mstb);
        }
+
+       (*mgr->cbs->hotplug)(mgr);
 }
 
 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                                       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
                                       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
                        }
+
+                       drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
                        for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
                                drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
                        }
-                       (*mgr->cbs->hotplug)(mgr);
                }
        } else {
                mstb->link_address_sent = false;
@@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
        return 0;
 }
 
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+       if (!mstb->port_parent)
+               return NULL;
+
+       if (mstb->port_parent->mstb != mstb)
+               return mstb->port_parent;
+
+       return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+                                                                        struct drm_dp_mst_branch *mstb,
+                                                                        int *port_num)
+{
+       struct drm_dp_mst_branch *rmstb = NULL;
+       struct drm_dp_mst_port *found_port;
+       mutex_lock(&mgr->lock);
+       if (mgr->mst_primary) {
+               found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+               if (found_port) {
+                       rmstb = found_port->parent;
+                       kref_get(&rmstb->kref);
+                       *port_num = found_port->port_num;
+               }
+       }
+       mutex_unlock(&mgr->lock);
+       return rmstb;
+}
+
 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
                                   struct drm_dp_mst_port *port,
                                   int id,
@@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 {
        struct drm_dp_sideband_msg_tx *txmsg;
        struct drm_dp_mst_branch *mstb;
-       int len, ret;
+       int len, ret, port_num;
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
+       port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
-       if (!mstb)
-               return -EINVAL;
+       if (!mstb) {
+               mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+               if (!mstb)
+                       return -EINVAL;
+       }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg) {
@@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
                sinks[i] = i;
 
        txmsg->dst = mstb;
-       len = build_allocate_payload(txmsg, port->port_num,
+       len = build_allocate_payload(txmsg, port_num,
                                     id,
                                     pbn, port->num_sdp_streams, sinks);
 
@@ -1983,31 +2057,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
                mgr->mst_primary = mstb;
                kref_get(&mgr->mst_primary->kref);
 
-               {
-                       struct drm_dp_payload reset_pay;
-                       reset_pay.start_slot = 0;
-                       reset_pay.num_slots = 0x3f;
-                       drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
-               }
-
                ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
-                                        DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+                                                        DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
                if (ret < 0) {
                        goto out_unlock;
                }
 
-
-               /* sort out guid */
-               ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
-               if (ret != 16) {
-                       DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
-                       goto out_unlock;
-               }
-
-               mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
-               if (!mgr->guid_valid) {
-                       ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
-                       mgr->guid_valid = true;
+               {
+                       struct drm_dp_payload reset_pay;
+                       reset_pay.start_slot = 0;
+                       reset_pay.num_slots = 0x3f;
+                       drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
                }
 
                queue_work(system_long_wq, &mgr->work);
@@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
                        }
 
                        drm_dp_update_port(mstb, &msg.u.conn_stat);
-                       DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
-                       (*mgr->cbs->hotplug)(mgr);
 
+                       DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
                } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
                        drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
                        if (!mstb)
@@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
 
        case DP_PEER_DEVICE_SST_SINK:
                status = connector_status_connected;
-               /* for logical ports - cache the EDID */
-               if (port->port_num >= 8 && !port->cached_edid) {
-                       port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
-               }
                break;
        case DP_PEER_DEVICE_DP_LEGACY_CONV:
                if (port->ldps)
@@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
 
        if (port->cached_edid)
                edid = drm_edid_duplicate(port->cached_edid);
-       else {
-               edid = drm_get_edid(connector, &port->aux.ddc);
-               drm_mode_connector_set_tile_property(connector);
-       }
+
        port->has_audio = drm_detect_monitor_audio(edid);
        drm_dp_put_port(port);
        return edid;
@@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
                DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
                if (pbn == port->vcpi.pbn) {
                        *slots = port->vcpi.num_slots;
+                       drm_dp_put_port(port);
                        return true;
                }
        }
@@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
  */
 int drm_dp_calc_pbn_mode(int clock, int bpp)
 {
-       fixed20_12 pix_bw;
-       fixed20_12 fbpp;
-       fixed20_12 result;
-       fixed20_12 margin, tmp;
-       u32 res;
-
-       pix_bw.full = dfixed_const(clock);
-       fbpp.full = dfixed_const(bpp);
-       tmp.full = dfixed_const(8);
-       fbpp.full = dfixed_div(fbpp, tmp);
-
-       result.full = dfixed_mul(pix_bw, fbpp);
-       margin.full = dfixed_const(54);
-       tmp.full = dfixed_const(64);
-       margin.full = dfixed_div(margin, tmp);
-       result.full = dfixed_div(result, margin);
-
-       margin.full = dfixed_const(1006);
-       tmp.full = dfixed_const(1000);
-       margin.full = dfixed_div(margin, tmp);
-       result.full = dfixed_mul(result, margin);
-
-       result.full = dfixed_div(result, tmp);
-       result.full = dfixed_ceil(result);
-       res = dfixed_trunc(result);
-       return res;
+       u64 kbps;
+       s64 peak_kbps;
+       u32 numerator;
+       u32 denominator;
+
+       kbps = clock * bpp;
+
+       /*
+        * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+        * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+        * common multiplier to render an integer PBN for all link rate/lane
+        * counts combinations
+        * calculate
+        * peak_kbps *= (1006/1000)
+        * peak_kbps *= (64/54)
+        * peak_kbps *= 8    convert to bytes
+        */
+
+       numerator = 64 * 1006;
+       denominator = 54 * 8 * 1000 * 1000;
+
+       kbps *= numerator;
+       peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+       return drm_fixp2int_ceil(peak_kbps);
 }
 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
 
@@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void)
 {
        int ret;
        ret = drm_dp_calc_pbn_mode(154000, 30);
-       if (ret != 689)
+       if (ret != 689) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               154000, 30, 689, ret);
                return -EINVAL;
+       }
        ret = drm_dp_calc_pbn_mode(234000, 30);
-       if (ret != 1047)
+       if (ret != 1047) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               234000, 30, 1047, ret);
                return -EINVAL;
+       }
+       ret = drm_dp_calc_pbn_mode(297000, 24);
+       if (ret != 1063) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               297000, 24, 1063, ret);
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work)
        mutex_unlock(&mgr->qlock);
 }
 
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+       struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+       kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+       kfree(port);
+}
+
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
+               kref_init(&port->kref);
+               INIT_LIST_HEAD(&port->next);
+
                mgr->cbs->destroy_connector(mgr, port->connector);
 
                drm_dp_port_teardown_pdt(port, port->pdt);
 
-               if (!port->input && port->vcpi.vcpi > 0)
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-               kfree(port);
+               if (!port->input && port->vcpi.vcpi > 0) {
+                       if (mgr->mst_state) {
+                               drm_dp_mst_reset_vcpi_slots(mgr, port);
+                               drm_dp_update_payload_part1(mgr);
+                               drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+                       }
+               }
+
+               kref_put(&port->kref, drm_dp_free_mst_port);
                send_hotplug = true;
        }
        if (send_hotplug)
@@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
        mgr->max_payloads = max_payloads;
        mgr->conn_base_id = conn_base_id;
+       if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
+           max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
+               return -EINVAL;
        mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
        if (!mgr->payloads)
                return -ENOMEM;
@@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        if (!mgr->proposed_vcpis)
                return -ENOMEM;
        set_bit(0, &mgr->payload_mask);
-       test_calc_pbn_mode();
+       if (test_calc_pbn_mode() < 0)
+               DRM_ERROR("MST PBN self-test failed\n");
+
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
index 9e585d51fb7807278aeef68221f186f8f36c4a3c..e881482b5971d16ee2539307f6bf41746fc4dbef 100644 (file)
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state_vg.xml (   5973 bytes, from 2015-03-25 11:26:01)
-- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml (  24309 bytes, from 2015-12-12 09:02:53)
+- common.xml   (  18379 bytes, from 2015-12-12 09:02:53)
 
 Copyright (C) 2015
 */
@@ -30,15 +30,19 @@ Copyright (C) 2015
 #define ENDIAN_MODE_NO_SWAP                                    0x00000000
 #define ENDIAN_MODE_SWAP_16                                    0x00000001
 #define ENDIAN_MODE_SWAP_32                                    0x00000002
+#define chipModel_GC200                                                0x00000200
 #define chipModel_GC300                                                0x00000300
 #define chipModel_GC320                                                0x00000320
+#define chipModel_GC328                                                0x00000328
 #define chipModel_GC350                                                0x00000350
 #define chipModel_GC355                                                0x00000355
 #define chipModel_GC400                                                0x00000400
 #define chipModel_GC410                                                0x00000410
 #define chipModel_GC420                                                0x00000420
+#define chipModel_GC428                                                0x00000428
 #define chipModel_GC450                                                0x00000450
 #define chipModel_GC500                                                0x00000500
+#define chipModel_GC520                                                0x00000520
 #define chipModel_GC530                                                0x00000530
 #define chipModel_GC600                                                0x00000600
 #define chipModel_GC700                                                0x00000700
@@ -46,9 +50,16 @@ Copyright (C) 2015
 #define chipModel_GC860                                                0x00000860
 #define chipModel_GC880                                                0x00000880
 #define chipModel_GC1000                                       0x00001000
+#define chipModel_GC1500                                       0x00001500
 #define chipModel_GC2000                                       0x00002000
 #define chipModel_GC2100                                       0x00002100
+#define chipModel_GC2200                                       0x00002200
+#define chipModel_GC2500                                       0x00002500
+#define chipModel_GC3000                                       0x00003000
 #define chipModel_GC4000                                       0x00004000
+#define chipModel_GC5000                                       0x00005000
+#define chipModel_GC5200                                       0x00005200
+#define chipModel_GC6400                                       0x00006400
 #define RGBA_BITS_R                                            0x00000001
 #define RGBA_BITS_G                                            0x00000002
 #define RGBA_BITS_B                                            0x00000004
@@ -160,7 +171,7 @@ Copyright (C) 2015
 #define chipMinorFeatures2_UNK8                                        0x00000100
 #define chipMinorFeatures2_UNK9                                        0x00000200
 #define chipMinorFeatures2_UNK10                               0x00000400
-#define chipMinorFeatures2_SAMPLERBASE_16                      0x00000800
+#define chipMinorFeatures2_HALTI1                              0x00000800
 #define chipMinorFeatures2_UNK12                               0x00001000
 #define chipMinorFeatures2_UNK13                               0x00002000
 #define chipMinorFeatures2_UNK14                               0x00004000
@@ -189,7 +200,7 @@ Copyright (C) 2015
 #define chipMinorFeatures3_UNK5                                        0x00000020
 #define chipMinorFeatures3_UNK6                                        0x00000040
 #define chipMinorFeatures3_UNK7                                        0x00000080
-#define chipMinorFeatures3_UNK8                                        0x00000100
+#define chipMinorFeatures3_FAST_MSAA                           0x00000100
 #define chipMinorFeatures3_UNK9                                        0x00000200
 #define chipMinorFeatures3_BUG_FIXES10                         0x00000400
 #define chipMinorFeatures3_UNK11                               0x00000800
@@ -199,7 +210,7 @@ Copyright (C) 2015
 #define chipMinorFeatures3_UNK15                               0x00008000
 #define chipMinorFeatures3_UNK16                               0x00010000
 #define chipMinorFeatures3_UNK17                               0x00020000
-#define chipMinorFeatures3_UNK18                               0x00040000
+#define chipMinorFeatures3_ACE                                 0x00040000
 #define chipMinorFeatures3_UNK19                               0x00080000
 #define chipMinorFeatures3_UNK20                               0x00100000
 #define chipMinorFeatures3_UNK21                               0x00200000
@@ -207,7 +218,7 @@ Copyright (C) 2015
 #define chipMinorFeatures3_UNK23                               0x00800000
 #define chipMinorFeatures3_UNK24                               0x01000000
 #define chipMinorFeatures3_UNK25                               0x02000000
-#define chipMinorFeatures3_UNK26                               0x04000000
+#define chipMinorFeatures3_NEW_HZ                              0x04000000
 #define chipMinorFeatures3_UNK27                               0x08000000
 #define chipMinorFeatures3_UNK28                               0x10000000
 #define chipMinorFeatures3_UNK29                               0x20000000
@@ -229,9 +240,9 @@ Copyright (C) 2015
 #define chipMinorFeatures4_UNK13                               0x00002000
 #define chipMinorFeatures4_UNK14                               0x00004000
 #define chipMinorFeatures4_UNK15                               0x00008000
-#define chipMinorFeatures4_UNK16                               0x00010000
+#define chipMinorFeatures4_HALTI2                              0x00010000
 #define chipMinorFeatures4_UNK17                               0x00020000
-#define chipMinorFeatures4_UNK18                               0x00040000
+#define chipMinorFeatures4_SMALL_MSAA                          0x00040000
 #define chipMinorFeatures4_UNK19                               0x00080000
 #define chipMinorFeatures4_UNK20                               0x00100000
 #define chipMinorFeatures4_UNK21                               0x00200000
@@ -245,5 +256,37 @@ Copyright (C) 2015
 #define chipMinorFeatures4_UNK29                               0x20000000
 #define chipMinorFeatures4_UNK30                               0x40000000
 #define chipMinorFeatures4_UNK31                               0x80000000
+#define chipMinorFeatures5_UNK0                                        0x00000001
+#define chipMinorFeatures5_UNK1                                        0x00000002
+#define chipMinorFeatures5_UNK2                                        0x00000004
+#define chipMinorFeatures5_UNK3                                        0x00000008
+#define chipMinorFeatures5_UNK4                                        0x00000010
+#define chipMinorFeatures5_UNK5                                        0x00000020
+#define chipMinorFeatures5_UNK6                                        0x00000040
+#define chipMinorFeatures5_UNK7                                        0x00000080
+#define chipMinorFeatures5_UNK8                                        0x00000100
+#define chipMinorFeatures5_HALTI3                              0x00000200
+#define chipMinorFeatures5_UNK10                               0x00000400
+#define chipMinorFeatures5_UNK11                               0x00000800
+#define chipMinorFeatures5_UNK12                               0x00001000
+#define chipMinorFeatures5_UNK13                               0x00002000
+#define chipMinorFeatures5_UNK14                               0x00004000
+#define chipMinorFeatures5_UNK15                               0x00008000
+#define chipMinorFeatures5_UNK16                               0x00010000
+#define chipMinorFeatures5_UNK17                               0x00020000
+#define chipMinorFeatures5_UNK18                               0x00040000
+#define chipMinorFeatures5_UNK19                               0x00080000
+#define chipMinorFeatures5_UNK20                               0x00100000
+#define chipMinorFeatures5_UNK21                               0x00200000
+#define chipMinorFeatures5_UNK22                               0x00400000
+#define chipMinorFeatures5_UNK23                               0x00800000
+#define chipMinorFeatures5_UNK24                               0x01000000
+#define chipMinorFeatures5_UNK25                               0x02000000
+#define chipMinorFeatures5_UNK26                               0x04000000
+#define chipMinorFeatures5_UNK27                               0x08000000
+#define chipMinorFeatures5_UNK28                               0x10000000
+#define chipMinorFeatures5_UNK29                               0x20000000
+#define chipMinorFeatures5_UNK30                               0x40000000
+#define chipMinorFeatures5_UNK31                               0x80000000
 
 #endif /* COMMON_XML */
index 5c89ebb52fd29b1357316fa000d4a61188f4d320..e8858985f01ea0fb5a17e39a3c7b246d6ae562d9 100644 (file)
@@ -668,7 +668,6 @@ static struct platform_driver etnaviv_platform_driver = {
        .probe      = etnaviv_pdev_probe,
        .remove     = etnaviv_pdev_remove,
        .driver     = {
-               .owner  = THIS_MODULE,
                .name   = "etnaviv",
                .of_match_table = dt_match,
        },
index d6bd438bd5bec73eb074aa15626fc8eb3d92d70b..1cd6046e76b11d824a06b24bd77a4cea3d1a1a93 100644 (file)
@@ -85,7 +85,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
        struct dma_buf_attachment *attach, struct sg_table *sg);
 int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
 void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
-void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
+void *etnaviv_gem_vmap(struct drm_gem_object *obj);
 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
                struct timespec *timeout);
 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
index bf8fa859e8bece4f037e1ef7eae1c6caf0a0c85d..4a29eeadbf1e738da23a6c29a2e00e8f68d81576 100644 (file)
@@ -201,7 +201,9 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 
                obj = vram->object;
 
+               mutex_lock(&obj->lock);
                pages = etnaviv_gem_get_pages(obj);
+               mutex_unlock(&obj->lock);
                if (pages) {
                        int j;
 
@@ -213,8 +215,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 
                iter.hdr->iova = cpu_to_le64(vram->iova);
 
-               vaddr = etnaviv_gem_vaddr(&obj->base);
-               if (vaddr && !IS_ERR(vaddr))
+               vaddr = etnaviv_gem_vmap(&obj->base);
+               if (vaddr)
                        memcpy(iter.data, vaddr, obj->base.size);
 
                etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
index 9f77c3b94cc696c4683d18da3e7848fb040874c6..4b519e4309b28edb1a1a9f56c40fb00153c87036 100644 (file)
@@ -353,25 +353,39 @@ void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
        drm_gem_object_unreference_unlocked(obj);
 }
 
-void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
+void *etnaviv_gem_vmap(struct drm_gem_object *obj)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 
-       mutex_lock(&etnaviv_obj->lock);
-       if (!etnaviv_obj->vaddr) {
-               struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
-
-               if (IS_ERR(pages))
-                       return ERR_CAST(pages);
+       if (etnaviv_obj->vaddr)
+               return etnaviv_obj->vaddr;
 
-               etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
-                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
-       }
+       mutex_lock(&etnaviv_obj->lock);
+       /*
+        * Need to check again, as we might have raced with another thread
+        * while waiting for the mutex.
+        */
+       if (!etnaviv_obj->vaddr)
+               etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
        mutex_unlock(&etnaviv_obj->lock);
 
        return etnaviv_obj->vaddr;
 }
 
+static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+{
+       struct page **pages;
+
+       lockdep_assert_held(&obj->lock);
+
+       pages = etnaviv_gem_get_pages(obj);
+       if (IS_ERR(pages))
+               return NULL;
+
+       return vmap(pages, obj->base.size >> PAGE_SHIFT,
+                       VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+}
+
 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 {
        if (op & ETNA_PREP_READ)
@@ -522,6 +536,7 @@ static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
        .get_pages = etnaviv_gem_shmem_get_pages,
        .release = etnaviv_gem_shmem_release,
+       .vmap = etnaviv_gem_vmap_impl,
 };
 
 void etnaviv_gem_free_object(struct drm_gem_object *obj)
@@ -866,6 +881,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
        .get_pages = etnaviv_gem_userptr_get_pages,
        .release = etnaviv_gem_userptr_release,
+       .vmap = etnaviv_gem_vmap_impl,
 };
 
 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
index a300b4b3d5458654fd1c0f200227976f64529e97..ab5df8147a5f6f1aaccad5c6ce14d99612e0ea27 100644 (file)
@@ -78,6 +78,7 @@ struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
 struct etnaviv_gem_ops {
        int (*get_pages)(struct etnaviv_gem_object *);
        void (*release)(struct etnaviv_gem_object *);
+       void *(*vmap)(struct etnaviv_gem_object *);
 };
 
 static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
index e94db4f95770e586837653b0a1b73d251bbe75d8..4e67395f5fa1c1572aa7051a86538aa16389f807 100644 (file)
@@ -31,7 +31,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
 
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
 {
-       return etnaviv_gem_vaddr(obj);
+       return etnaviv_gem_vmap(obj);
 }
 
 void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -77,9 +77,17 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
        drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
 }
 
+static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
+{
+       lockdep_assert_held(&etnaviv_obj->lock);
+
+       return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+}
+
 static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
        /* .get_pages should never be called */
        .release = etnaviv_gem_prime_release,
+       .vmap = etnaviv_gem_prime_vmap_impl,
 };
 
 struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
index 056a72e6ed26273146269ee0258784e8a714d36a..a33162cf4f4cc6c4f0aee66433680a15f64cfffc 100644 (file)
@@ -72,6 +72,14 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
                *value = gpu->identity.minor_features3;
                break;
 
+       case ETNAVIV_PARAM_GPU_FEATURES_5:
+               *value = gpu->identity.minor_features4;
+               break;
+
+       case ETNAVIV_PARAM_GPU_FEATURES_6:
+               *value = gpu->identity.minor_features5;
+               break;
+
        case ETNAVIV_PARAM_GPU_STREAM_COUNT:
                *value = gpu->identity.stream_count;
                break;
@@ -112,6 +120,10 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
                *value = gpu->identity.num_constants;
                break;
 
+       case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
+               *value = gpu->identity.varyings_count;
+               break;
+
        default:
                DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
                return -EINVAL;
@@ -120,46 +132,56 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
        return 0;
 }
 
+
+#define etnaviv_is_model_rev(gpu, mod, rev) \
+       ((gpu)->identity.model == chipModel_##mod && \
+        (gpu)->identity.revision == rev)
+#define etnaviv_field(val, field) \
+       (((val) & field##__MASK) >> field##__SHIFT)
+
 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 {
        if (gpu->identity.minor_features0 &
            chipMinorFeatures0_MORE_MINOR_FEATURES) {
-               u32 specs[2];
+               u32 specs[4];
+               unsigned int streams;
 
                specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
                specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
-
-               gpu->identity.stream_count =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
-               gpu->identity.register_max =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
-                               >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
-               gpu->identity.thread_count =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
-               gpu->identity.vertex_cache_size =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
-                               >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
-               gpu->identity.shader_core_count =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
-               gpu->identity.pixel_pipes =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
-                               >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
+               specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
+               specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
+
+               gpu->identity.stream_count = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_STREAM_COUNT);
+               gpu->identity.register_max = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_REGISTER_MAX);
+               gpu->identity.thread_count = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_THREAD_COUNT);
+               gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
+               gpu->identity.shader_core_count = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
+               gpu->identity.pixel_pipes = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
                gpu->identity.vertex_output_buffer_size =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
-                               >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
-
-               gpu->identity.buffer_size =
-                       (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
-                               >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
-               gpu->identity.instruction_count =
-                       (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
-               gpu->identity.num_constants =
-                       (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
-                               >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
+                       etnaviv_field(specs[0],
+                               VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
+
+               gpu->identity.buffer_size = etnaviv_field(specs[1],
+                                       VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
+               gpu->identity.instruction_count = etnaviv_field(specs[1],
+                                       VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
+               gpu->identity.num_constants = etnaviv_field(specs[1],
+                                       VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
+
+               gpu->identity.varyings_count = etnaviv_field(specs[2],
+                                       VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
+
+               /* This overrides the value from older register if non-zero */
+               streams = etnaviv_field(specs[3],
+                                       VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
+               if (streams)
+                       gpu->identity.stream_count = streams;
        }
 
        /* Fill in the stream count if not specified */
@@ -173,7 +195,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
        /* Convert the register max value */
        if (gpu->identity.register_max)
                gpu->identity.register_max = 1 << gpu->identity.register_max;
-       else if (gpu->identity.model == 0x0400)
+       else if (gpu->identity.model == chipModel_GC400)
                gpu->identity.register_max = 32;
        else
                gpu->identity.register_max = 64;
@@ -181,10 +203,10 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
        /* Convert thread count */
        if (gpu->identity.thread_count)
                gpu->identity.thread_count = 1 << gpu->identity.thread_count;
-       else if (gpu->identity.model == 0x0400)
+       else if (gpu->identity.model == chipModel_GC400)
                gpu->identity.thread_count = 64;
-       else if (gpu->identity.model == 0x0500 ||
-                gpu->identity.model == 0x0530)
+       else if (gpu->identity.model == chipModel_GC500 ||
+                gpu->identity.model == chipModel_GC530)
                gpu->identity.thread_count = 128;
        else
                gpu->identity.thread_count = 256;
@@ -206,7 +228,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
        if (gpu->identity.vertex_output_buffer_size) {
                gpu->identity.vertex_output_buffer_size =
                        1 << gpu->identity.vertex_output_buffer_size;
-       } else if (gpu->identity.model == 0x0400) {
+       } else if (gpu->identity.model == chipModel_GC400) {
                if (gpu->identity.revision < 0x4000)
                        gpu->identity.vertex_output_buffer_size = 512;
                else if (gpu->identity.revision < 0x4200)
@@ -219,9 +241,8 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 
        switch (gpu->identity.instruction_count) {
        case 0:
-               if ((gpu->identity.model == 0x2000 &&
-                    gpu->identity.revision == 0x5108) ||
-                   gpu->identity.model == 0x880)
+               if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+                   gpu->identity.model == chipModel_GC880)
                        gpu->identity.instruction_count = 512;
                else
                        gpu->identity.instruction_count = 256;
@@ -242,6 +263,30 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 
        if (gpu->identity.num_constants == 0)
                gpu->identity.num_constants = 168;
+
+       if (gpu->identity.varyings_count == 0) {
+               if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
+                       gpu->identity.varyings_count = 12;
+               else
+                       gpu->identity.varyings_count = 8;
+       }
+
+       /*
+        * For some cores, two varyings are consumed for position, so the
+        * maximum varying count needs to be reduced by one.
+        */
+       if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
+           etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+           etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
+           etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+           etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
+           etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
+           etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
+           etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+           etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
+           etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
+           etnaviv_is_model_rev(gpu, GC880, 0x5106))
+               gpu->identity.varyings_count -= 1;
 }
 
 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
@@ -251,12 +296,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
        chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 
        /* Special case for older graphic cores. */
-       if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
-            >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) ==  0x01) {
-               gpu->identity.model    = 0x500; /* gc500 */
-               gpu->identity.revision =
-                       (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
-                       >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT;
+       if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
+               gpu->identity.model    = chipModel_GC500;
+               gpu->identity.revision = etnaviv_field(chipIdentity,
+                                        VIVS_HI_CHIP_IDENTITY_REVISION);
        } else {
 
                gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
@@ -269,13 +312,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                 * same.  Only for GC400 family.
                 */
                if ((gpu->identity.model & 0xff00) == 0x0400 &&
-                   gpu->identity.model != 0x0420) {
+                   gpu->identity.model != chipModel_GC420) {
                        gpu->identity.model = gpu->identity.model & 0x0400;
                }
 
                /* Another special case */
-               if (gpu->identity.model == 0x300 &&
-                   gpu->identity.revision == 0x2201) {
+               if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
                        u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
                        u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 
@@ -295,11 +337,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
        gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 
        /* Disable fast clear on GC700. */
-       if (gpu->identity.model == 0x700)
+       if (gpu->identity.model == chipModel_GC700)
                gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 
-       if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
-           (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
+       if ((gpu->identity.model == chipModel_GC500 &&
+            gpu->identity.revision < 2) ||
+           (gpu->identity.model == chipModel_GC300 &&
+            gpu->identity.revision < 0x2000)) {
 
                /*
                 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
@@ -309,6 +353,8 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                gpu->identity.minor_features1 = 0;
                gpu->identity.minor_features2 = 0;
                gpu->identity.minor_features3 = 0;
+               gpu->identity.minor_features4 = 0;
+               gpu->identity.minor_features5 = 0;
        } else
                gpu->identity.minor_features0 =
                                gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
@@ -321,6 +367,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                                gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
                gpu->identity.minor_features3 =
                                gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+               gpu->identity.minor_features4 =
+                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
+               gpu->identity.minor_features5 =
+                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
        }
 
        /* GC600 idle register reports zero bits where modules aren't present */
@@ -441,10 +491,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
        u16 prefetch;
 
-       if (gpu->identity.model == chipModel_GC320 &&
-           gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
-           (gpu->identity.revision == 0x5007 ||
-            gpu->identity.revision == 0x5220)) {
+       if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
+            etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
+           gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
                u32 mc_memory_debug;
 
                mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
@@ -466,7 +515,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
                  VIVS_HI_AXI_CONFIG_ARCACHE(2));
 
        /* GC2000 rev 5108 needs a special bus config */
-       if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
+       if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
                u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
                bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
                                VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
@@ -511,8 +560,16 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 
        if (gpu->identity.model == 0) {
                dev_err(gpu->dev, "Unknown GPU model\n");
-               pm_runtime_put_autosuspend(gpu->dev);
-               return -ENXIO;
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       /* Exclude VG cores with FE2.0 */
+       if (gpu->identity.features & chipFeatures_PIPE_VG &&
+           gpu->identity.features & chipFeatures_FE20) {
+               dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
+               ret = -ENXIO;
+               goto fail;
        }
 
        ret = etnaviv_hw_reset(gpu);
@@ -539,10 +596,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
                goto fail;
        }
 
-       /* TODO: we will leak here memory - fix it! */
-
        gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
        if (!gpu->mmu) {
+               iommu_domain_free(iommu);
                ret = -ENOMEM;
                goto fail;
        }
@@ -552,7 +608,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        if (!gpu->buffer) {
                ret = -ENOMEM;
                dev_err(gpu->dev, "could not create command buffer\n");
-               goto fail;
+               goto destroy_iommu;
        }
        if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
                ret = -EINVAL;
@@ -582,6 +638,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 free_buffer:
        etnaviv_gpu_cmdbuf_free(gpu->buffer);
        gpu->buffer = NULL;
+destroy_iommu:
+       etnaviv_iommu_destroy(gpu->mmu);
+       gpu->mmu = NULL;
 fail:
        pm_runtime_mark_last_busy(gpu->dev);
        pm_runtime_put_autosuspend(gpu->dev);
@@ -642,6 +701,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
                   gpu->identity.minor_features2);
        seq_printf(m, "\t minor_features3: 0x%08x\n",
                   gpu->identity.minor_features3);
+       seq_printf(m, "\t minor_features4: 0x%08x\n",
+                  gpu->identity.minor_features4);
+       seq_printf(m, "\t minor_features5: 0x%08x\n",
+                  gpu->identity.minor_features5);
 
        seq_puts(m, "\tspecs\n");
        seq_printf(m, "\t stream_count:  %d\n",
@@ -664,6 +727,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
                        gpu->identity.instruction_count);
        seq_printf(m, "\t num_constants: %d\n",
                        gpu->identity.num_constants);
+       seq_printf(m, "\t varyings_count: %d\n",
+                       gpu->identity.varyings_count);
 
        seq_printf(m, "\taxi: 0x%08x\n", axi);
        seq_printf(m, "\tidle: 0x%08x\n", idle);
index c75d50359ab07baa40d7813fe07cd9928009df91..f233ac4c7c1cea456ee4c4c4b7e0d0878aefc093 100644 (file)
@@ -46,6 +46,12 @@ struct etnaviv_chip_identity {
        /* Supported minor feature 3 fields. */
        u32 minor_features3;
 
+       /* Supported minor feature 4 fields. */
+       u32 minor_features4;
+
+       /* Supported minor feature 5 fields. */
+       u32 minor_features5;
+
        /* Number of streams supported. */
        u32 stream_count;
 
@@ -75,6 +81,9 @@ struct etnaviv_chip_identity {
 
        /* Buffer size */
        u32 buffer_size;
+
+       /* Number of varyings */
+       u8 varyings_count;
 };
 
 struct etnaviv_event {
index 0064f2640396111bce54f3ae0caa9f45e94717d0..6a7de5f1454a51532c0809e2e741c53db4fba4e9 100644 (file)
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state_hi.xml (  23420 bytes, from 2015-03-25 11:47:21)
-- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml (  24309 bytes, from 2015-12-12 09:02:53)
+- common.xml   (  18437 bytes, from 2015-12-12 09:02:53)
 
 Copyright (C) 2015
 */
@@ -182,8 +182,25 @@ Copyright (C) 2015
 
 #define VIVS_HI_CHIP_MINOR_FEATURE_3                           0x00000088
 
+#define VIVS_HI_CHIP_SPECS_3                                   0x0000008c
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK              0x000001f0
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT             4
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x)                 (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK              0x00000007
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT             0
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x)                 (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
+
 #define VIVS_HI_CHIP_MINOR_FEATURE_4                           0x00000094
 
+#define VIVS_HI_CHIP_SPECS_4                                   0x0000009c
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK                        0x0001f000
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT               12
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x)                   (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_5                           0x000000a0
+
+#define VIVS_HI_CHIP_PRODUCT_ID                                        0x000000a8
+
 #define VIVS_PM                                                        0x00000000
 
 #define VIVS_PM_POWER_CONTROLS                                 0x00000100
@@ -206,6 +223,11 @@ Copyright (C) 2015
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE            0x00000001
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE            0x00000002
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE            0x00000004
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH            0x00000008
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA            0x00000010
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE            0x00000020
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA            0x00000040
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX            0x00000080
 
 #define VIVS_PM_PULSE_EATER                                    0x0000010c
 
index b79c316c2ad2ce7b7eb48e152a49c41ab6e93431..673164b331c8806e14aaa10d82e2f787c10bfb09 100644 (file)
@@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = {
 static int exynos_dp_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
+       struct device_node *np = NULL, *endpoint = NULL;
        struct exynos_dp_device *dp;
        int ret;
 
@@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dp);
 
        /* This is for the backward compatibility. */
-       panel_node = of_parse_phandle(dev->of_node, "panel", 0);
-       if (panel_node) {
-               dp->panel = of_drm_find_panel(panel_node);
-               of_node_put(panel_node);
+       np = of_parse_phandle(dev->of_node, "panel", 0);
+       if (np) {
+               dp->panel = of_drm_find_panel(np);
+               of_node_put(np);
                if (!dp->panel)
                        return -EPROBE_DEFER;
-       } else {
-               endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
-               if (endpoint) {
-                       panel_node = of_graph_get_remote_port_parent(endpoint);
-                       if (panel_node) {
-                               dp->panel = of_drm_find_panel(panel_node);
-                               of_node_put(panel_node);
-                               if (!dp->panel)
-                                       return -EPROBE_DEFER;
-                       } else {
-                               DRM_ERROR("no port node for panel device.\n");
-                               return -EINVAL;
-                       }
-               }
-       }
-
-       if (endpoint)
                goto out;
+       }
 
        endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
        if (endpoint) {
-               bridge_node = of_graph_get_remote_port_parent(endpoint);
-               if (bridge_node) {
-                       dp->ptn_bridge = of_drm_find_bridge(bridge_node);
-                       of_node_put(bridge_node);
-                       if (!dp->ptn_bridge)
-                               return -EPROBE_DEFER;
-               } else
-                       return -EPROBE_DEFER;
+               np = of_graph_get_remote_port_parent(endpoint);
+               if (np) {
+                       /* The remote port can be either a panel or a bridge */
+                       dp->panel = of_drm_find_panel(np);
+                       if (!dp->panel) {
+                               dp->ptn_bridge = of_drm_find_bridge(np);
+                               if (!dp->ptn_bridge) {
+                                       of_node_put(np);
+                                       return -EPROBE_DEFER;
+                               }
+                       }
+                       of_node_put(np);
+               } else {
+                       DRM_ERROR("no remote endpoint device node found.\n");
+                       return -EINVAL;
+               }
+       } else {
+               DRM_ERROR("no port endpoint subnode found.\n");
+               return -EINVAL;
        }
 
 out:
index d84a498ef099712c56f319c8333c8c8564ef457a..e977a81af2e67d91101e644b393d8ce1393b3fc8 100644 (file)
@@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int exynos_dsi_suspend(struct device *dev)
+static int __maybe_unused exynos_dsi_suspend(struct device *dev)
 {
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev)
        return 0;
 }
 
-static int exynos_dsi_resume(struct device *dev)
+static int __maybe_unused exynos_dsi_resume(struct device *dev)
 {
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1972,7 +1971,6 @@ err_clk:
 
        return ret;
 }
-#endif
 
 static const struct dev_pm_ops exynos_dsi_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
index b5fbc1cbf02454d0e8c76de88f91f2c63e343e2c..0a5a60005f7e5711e14147dfa21990be1b88a2b8 100644 (file)
@@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_mixer_suspend(struct device *dev)
+static int __maybe_unused exynos_mixer_suspend(struct device *dev)
 {
        struct mixer_context *ctx = dev_get_drvdata(dev);
        struct mixer_resources *res = &ctx->mixer_res;
@@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev)
        return 0;
 }
 
-static int exynos_mixer_resume(struct device *dev)
+static int __maybe_unused exynos_mixer_resume(struct device *dev)
 {
        struct mixer_context *ctx = dev_get_drvdata(dev);
        struct mixer_resources *res = &ctx->mixer_res;
@@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops exynos_mixer_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
index 533d1e3d4a999f971b2479f471c965e3708a73f2..a02112ba1c3df692b3a089a3ab24a8f30d35b7ec 100644 (file)
@@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
        case ADV7511_REG_BKSV(3):
        case ADV7511_REG_BKSV(4):
        case ADV7511_REG_DDC_STATUS:
+       case ADV7511_REG_EDID_READ_CTRL:
        case ADV7511_REG_BSTATUS(0):
        case ADV7511_REG_BSTATUS(1):
        case ADV7511_REG_CHIP_ID_HIGH:
@@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511)
 {
        adv7511->current_edid_segment = -1;
 
-       regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                    ADV7511_INT0_EDID_READY);
-       regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-                    ADV7511_INT1_DDC_ERROR);
        regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
                           ADV7511_POWER_POWER_DOWN, 0);
+       if (adv7511->i2c_main->irq) {
+               /*
+                * Documentation says the INT_ENABLE registers are reset in
+                * POWER_DOWN mode. My 7511w preserved the bits, however.
+                * Still, let's be safe and stick to the documentation.
+                */
+               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+                            ADV7511_INT0_EDID_READY);
+               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+                            ADV7511_INT1_DDC_ERROR);
+       }
 
        /*
-        * Per spec it is allowed to pulse the HDP signal to indicate that the
+        * Per spec it is allowed to pulse the HPD signal to indicate that the
         * EDID information has changed. Some monitors do this when they wakeup
-        * from standby or are enabled. When the HDP goes low the adv7511 is
+        * from standby or are enabled. When the HPD goes low the adv7511 is
         * reset and the outputs are disabled which might cause the monitor to
-        * go to standby again. To avoid this we ignore the HDP pin for the
+        * go to standby again. To avoid this we ignore the HPD pin for the
         * first few seconds after enabling the output.
         */
        regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-                          ADV7511_REG_POWER2_HDP_SRC_MASK,
-                          ADV7511_REG_POWER2_HDP_SRC_NONE);
+                          ADV7511_REG_POWER2_HPD_SRC_MASK,
+                          ADV7511_REG_POWER2_HPD_SRC_NONE);
 
        /*
         * Most of the registers are reset during power down or when HPD is low.
@@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
        if (ret < 0)
                return false;
 
-       if (irq0 & ADV7511_INT0_HDP) {
+       if (irq0 & ADV7511_INT0_HPD) {
                regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                            ADV7511_INT0_HDP);
+                            ADV7511_INT0_HPD);
                return true;
        }
 
@@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
        regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
        regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
 
-       if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+       if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
                drm_helper_hpd_irq_event(adv7511->encoder->dev);
 
        if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
 
        /* Reading the EDID only works if the device is powered */
        if (!adv7511->powered) {
-               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                            ADV7511_INT0_EDID_READY);
-               regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-                            ADV7511_INT1_DDC_ERROR);
                regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
                                   ADV7511_POWER_POWER_DOWN, 0);
+               if (adv7511->i2c_main->irq) {
+                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+                                    ADV7511_INT0_EDID_READY);
+                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+                                    ADV7511_INT1_DDC_ERROR);
+               }
                adv7511->current_edid_segment = -1;
        }
 
@@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
                if (adv7511->status == connector_status_connected)
                        status = connector_status_disconnected;
        } else {
-               /* Renable HDP sensing */
+               /* Renable HPD sensing */
                regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-                                  ADV7511_REG_POWER2_HDP_SRC_MASK,
-                                  ADV7511_REG_POWER2_HDP_SRC_BOTH);
+                                  ADV7511_REG_POWER2_HPD_SRC_MASK,
+                                  ADV7511_REG_POWER2_HPD_SRC_BOTH);
        }
 
        adv7511->status = status;
index 6599ed538426d60f41a9dbebe3e9bcd8e8b11acc..38515b30cedfc84812a556638fe7333ff6ebeb8b 100644 (file)
@@ -90,7 +90,7 @@
 #define ADV7511_CSC_ENABLE                     BIT(7)
 #define ADV7511_CSC_UPDATE_MODE                        BIT(5)
 
-#define ADV7511_INT0_HDP                       BIT(7)
+#define ADV7511_INT0_HPD                       BIT(7)
 #define ADV7511_INT0_VSYNC                     BIT(5)
 #define ADV7511_INT0_AUDIO_FIFO_FULL           BIT(4)
 #define ADV7511_INT0_EDID_READY                        BIT(2)
 #define ADV7511_PACKET_ENABLE_SPARE2           BIT(1)
 #define ADV7511_PACKET_ENABLE_SPARE1           BIT(0)
 
-#define ADV7511_REG_POWER2_HDP_SRC_MASK                0xc0
-#define ADV7511_REG_POWER2_HDP_SRC_BOTH                0x00
-#define ADV7511_REG_POWER2_HDP_SRC_HDP         0x40
-#define ADV7511_REG_POWER2_HDP_SRC_CEC         0x80
-#define ADV7511_REG_POWER2_HDP_SRC_NONE                0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_MASK                0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH                0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD         0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC         0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE                0xc0
 #define ADV7511_REG_POWER2_TDMS_ENABLE         BIT(4)
 #define ADV7511_REG_POWER2_GATE_INPUT_CLK      BIT(0)
 
index fcd77b27514dfdb738334024c2b1201732af6dc3..051eab33e4c7b13260994ebb884cc44a5394c4bc 100644 (file)
@@ -10,7 +10,6 @@ config DRM_I915
        # the shmem_readpage() which depends upon tmpfs
        select SHMEM
        select TMPFS
-       select STOP_MACHINE
        select DRM_KMS_HELPER
        select DRM_PANEL
        select DRM_MIPI_DSI
index 3ac616d7363bafcc4197aa126c509932d15fb299..f357058c74d938a41f922c4b40e3da1d67aa029a 100644 (file)
@@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev)
                                WARN_ON(!IS_SKYLAKE(dev) &&
                                        !IS_KABYLAKE(dev));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
-                                  (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
+                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                                   pch->subsystem_vendor == 0x1af4 &&
+                                   pch->subsystem_device == 0x1100)) {
                                dev_priv->pch_type = intel_virt_detect_pch(dev);
                        } else
                                continue;
index f0f75d7c0d94263f86ccfdd7d8b84af831462be0..e7cd311e9fbb250b755857462b4eefff59a22d0a 100644 (file)
@@ -1988,6 +1988,9 @@ enum hdmi_force_audio {
 #define I915_GTT_OFFSET_NONE ((u32)-1)
 
 struct drm_i915_gem_object_ops {
+       unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
         * of pages before to binding them into the GTT, and put_pages() is
@@ -2003,6 +2006,7 @@ struct drm_i915_gem_object_ops {
         */
        int (*get_pages)(struct drm_i915_gem_object *);
        void (*put_pages)(struct drm_i915_gem_object *);
+
        int (*dmabuf_export)(struct drm_i915_gem_object *);
        void (*release)(struct drm_i915_gem_object *);
 };
index ddc21d4b388d2419a63fbd4f4a5745deea80957b..bb44bad15403556fb443998852864d8e0fb83472 100644 (file)
@@ -4425,6 +4425,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
        .get_pages = i915_gem_object_get_pages_gtt,
        .put_pages = i915_gem_object_put_pages_gtt,
 };
@@ -5261,7 +5262,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
        struct page *page;
 
        /* Only default objects have per-page dirty tracking */
-       if (WARN_ON(obj->ops != &i915_gem_object_ops))
+       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
                return NULL;
 
        page = i915_gem_object_get_page(obj, n);
index 19fb0bddc1cddfce0804e459319dc11ba96c5ab7..59e45b3a69379a0e892fbd85d7a17ba3f85913eb 100644 (file)
@@ -789,9 +789,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
-       .dmabuf_export = i915_gem_userptr_dmabuf_export,
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
        .get_pages = i915_gem_userptr_get_pages,
        .put_pages = i915_gem_userptr_put_pages,
+       .dmabuf_export = i915_gem_userptr_dmabuf_export,
        .release = i915_gem_userptr_release,
 };
 
index 007ae83a4086d65ff7e4d3862f2ad2a035c74540..b9a564b765282886a3427b2d601dd45e7ea8ed82 100644 (file)
@@ -7514,7 +7514,7 @@ enum skl_disp_power_wells {
 #define  DPLL_CFGCR2_PDIV_7 (4<<2)
 #define  DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
-#define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
+#define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
 #define DPLL_CFGCR2(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
 
 /* BXT display engine PLL */
index a2aa09ce3202f3ca72e64f550244e2a2c8e8a29f..a8af594fbd0097ba066071710a4832c3b731474e 100644 (file)
@@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev)
                dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
                dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
                dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
-       } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+       } else if (INTEL_INFO(dev)->gen <= 4) {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
                dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
                dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev)
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
                I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-       } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+       } else if (INTEL_INFO(dev)->gen <= 4) {
                I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
                I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
index e6408e5583d7a88af4511c6ec2334db2b7922555..54a165b9c92dd94a99faa132bdb19f6e6f96daef 100644 (file)
@@ -1589,7 +1589,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
                         DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
                         DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
                         wrpll_params.central_freq;
-       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+                  intel_encoder->type == INTEL_OUTPUT_DP_MST) {
                switch (crtc_state->port_clock / 2) {
                case 81000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
index 2f00828ccc6e923a65447424c716d9cc5ee0e99b..5feb65725c04e350c09d33b8969a53770d3d6049 100644 (file)
@@ -2946,7 +2946,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
        struct i915_vma *vma;
        u64 offset;
 
-       intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
+       intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
                                intel_plane->base.state);
 
        vma = i915_gem_obj_to_ggtt_view(obj, &view);
@@ -12075,11 +12075,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
        }
 
-       /* Clamp bpp to 8 on screens without EDID 1.4 */
-       if (connector->base.display_info.bpc == 0 && bpp > 24) {
-               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
-                             bpp);
-               pipe_config->pipe_bpp = 24;
+       /* Clamp bpp to default limit on screens without EDID 1.4 */
+       if (connector->base.display_info.bpc == 0) {
+               int type = connector->base.connector_type;
+               int clamp_bpp = 24;
+
+               /* Fall back to 18 bpp when DP sink capability is unknown. */
+               if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+                   type == DRM_MODE_CONNECTOR_eDP)
+                       clamp_bpp = 18;
+
+               if (bpp > clamp_bpp) {
+                       DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+                                     bpp, clamp_bpp);
+                       pipe_config->pipe_bpp = clamp_bpp;
+               }
        }
 }
 
@@ -13883,11 +13893,12 @@ intel_check_primary_plane(struct drm_plane *plane,
        int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        bool can_position = false;
 
-       /* use scaler when colorkey is not required */
-       if (INTEL_INFO(plane->dev)->gen >= 9 &&
-           state->ckey.flags == I915_SET_COLORKEY_NONE) {
-               min_scale = 1;
-               max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+       if (INTEL_INFO(plane->dev)->gen >= 9) {
+               /* use scaler when colorkey is not required */
+               if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+                       min_scale = 1;
+                       max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+               }
                can_position = true;
        }
 
index 88887938e0bfd3e0cf3e7bdebd66a145f7e036a7..0b8eefc2acc5d93088b960e4714bce55944df82e 100644 (file)
@@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
        }
 }
 
-static void
-intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+/*
+ * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 {
-       bool channel_eq = false;
-       int tries, cr_tries;
-       uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+       u32 training_pattern = DP_TRAINING_PATTERN_2;
+       bool source_tps3, sink_tps3;
 
        /*
-        * Training Pattern 3 for HBR2 or 1.2 devices that support it.
-        *
         * Intel platforms that support HBR2 also support TPS3. TPS3 support is
-        * also mandatory for downstream devices that support HBR2.
+        * also mandatory for downstream devices that support HBR2. However, not
+        * all sinks follow the spec.
         *
         * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
-        * supported but still not enabled.
+        * supported in source but still not enabled.
         */
-       if (intel_dp_source_supports_hbr2(intel_dp) &&
-           drm_dp_tps3_supported(intel_dp->dpcd))
+       source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
+       sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
+
+       if (source_tps3 && sink_tps3) {
                training_pattern = DP_TRAINING_PATTERN_3;
-       else if (intel_dp->link_rate == 540000)
-               DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
+       } else if (intel_dp->link_rate == 540000) {
+               if (!source_tps3)
+                       DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+               if (!sink_tps3)
+                       DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+       }
+
+       return training_pattern;
+}
+
+static void
+intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+{
+       bool channel_eq = false;
+       int tries, cr_tries;
+       u32 training_pattern;
+
+       training_pattern = intel_dp_training_pattern(intel_dp);
 
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp,
index a5e99ac305daab3ef69471d37b0ec9a97a27425c..e8113ad6547782ff5836839f1354a860aea5e464 100644 (file)
@@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        struct drm_device *dev = intel_dsi->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (dev_priv->vbt.dsi.seq_version >= 3)
+               data++;
+
        gpio = *data++;
 
        /* pull up/down */
-       action = *data++;
+       action = *data++ & 1;
+
+       if (gpio >= ARRAY_SIZE(gtable)) {
+               DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+               goto out;
+       }
+
+       if (!IS_VALLEYVIEW(dev_priv)) {
+               DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
+               goto out;
+       }
+
+       if (dev_priv->vbt.dsi.seq_version >= 3) {
+               DRM_DEBUG_KMS("GPIO element v3 not supported\n");
+               goto out;
+       }
 
        function = gtable[gpio].function_reg;
        pad = gtable[gpio].pad_reg;
@@ -226,6 +244,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        vlv_gpio_nc_write(dev_priv, pad, val);
        mutex_unlock(&dev_priv->sb_lock);
 
+out:
        return data;
 }
 
index 25254b5c1ac5c95173d0d5b4fa101f35a1131cb7..deb8282c26d83f952473ae145c4fef0b3112b9f1 100644 (file)
@@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev)
        return 0;
 
 err:
-       while (--pin) {
+       while (pin--) {
                if (!intel_gmbus_is_valid_pin(dev_priv, pin))
                        continue;
 
index 3aa614731d7e4b7160a85336d96df41469587991..f1fa756c5d5d59bf8877daded5c23329f63ed331 100644 (file)
@@ -1707,6 +1707,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
 
index eb5fa05cf476e465ab2c0df6ee52f6870ec568f8..a234687792f0b3be15b98ad08b84743135eac5ce 100644 (file)
@@ -1783,16 +1783,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
                                   const struct intel_plane_state *pstate,
                                   uint32_t mem_value)
 {
-       int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+       /*
+        * We treat the cursor plane as always-on for the purposes of watermark
+        * calculation.  Until we have two-stage watermark programming merged,
+        * this is necessary to avoid flickering.
+        */
+       int cpp = 4;
+       int width = pstate->visible ? pstate->base.crtc_w : 64;
 
-       if (!cstate->base.active || !pstate->visible)
+       if (!cstate->base.active)
                return 0;
 
        return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
                              cstate->base.adjusted_mode.crtc_htotal,
-                             drm_rect_width(&pstate->dst),
-                             bpp,
-                             mem_value);
+                             width, cpp, mem_value);
 }
 
 /* Only for WM_LP. */
index 339701d7a9a5069ccce00f2f171f923073d2c3b5..40c6aff57256140a3dccd780356cfcd859f377ba 100644 (file)
@@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
@@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
index 6bfc46369db1b4f8daa02fba453a382e863a3585..367a916f364e94617a1b15fcd7d78cd3f90c8dc6 100644 (file)
@@ -304,18 +304,10 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
                unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
                        DENTIST_DPREFCLK_WDIVIDER_MASK) >>
                        DENTIST_DPREFCLK_WDIVIDER_SHIFT;
-
-               if (div < 128 && div >= 96)
-                       div -= 64;
-               else if (div >= 64)
-                       div = div / 2 - 16;
-               else if (div >= 8)
-                       div /= 4;
-               else
-                       div = 0;
+               div = radeon_audio_decode_dfs_div(div);
 
                if (div)
-                       clock = rdev->clock.gpupll_outputfreq * 10 / div;
+                       clock = clock * 100 / div;
 
                WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
                WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
index 9953356fe2637cfacdc2ba41e8ecd082d65213ff..3cf04a2f44bbb05169264a504349dec3d949e5bb 100644 (file)
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
+       if (ASIC_IS_DCE41(rdev)) {
+               unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+                       DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+                       DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+               div = radeon_audio_decode_dfs_div(div);
+
+               if (div)
+                       clock = 100 * clock / div;
+       }
+
        WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
        WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
 }
index 4aa5f755572b1593a8b6f7876cf5f7aed183715d..13b6029d65cc524528aec6a1f1727b9d695aa4af 100644 (file)
 #define DCCG_AUDIO_DTO1_CNTL              0x05cc
 #       define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
 
+#define DCE41_DENTIST_DISPCLK_CNTL                     0x049c
+#       define DENTIST_DPREFCLK_WDIVIDER(x)            (((x) & 0x7f) << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_MASK          (0x7f << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_SHIFT         24
+
 /* DCE 4.0 AFMT */
 #define HDMI_CONTROL                         0x7030
 #       define HDMI_KEEPOUT_MODE             (1 << 0)
index 5ae6db98aa4d1eae63990bbd5d8cff1b82dfc6e3..78a51b3eda10c4d2931c3c783221ad0625d9a9c9 100644 (file)
@@ -268,7 +268,7 @@ struct radeon_clock {
        uint32_t current_dispclk;
        uint32_t dp_extclk;
        uint32_t max_pixel_clock;
-       uint32_t gpupll_outputfreq;
+       uint32_t vco_freq;
 };
 
 /*
index 08fc1b5effa8a5ec21d4f6234bc168c5b2508b36..de9a2ffcf5f762539d6d4c92464ed5eff19e1c42 100644 (file)
@@ -1106,6 +1106,31 @@ union firmware_info {
        ATOM_FIRMWARE_INFO_V2_2 info_22;
 };
 
+union igp_info {
+       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+       union igp_info *igp_info;
+       u8 frev, crev;
+       u16 data_offset;
+
+       if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+                       &frev, &crev, &data_offset)) {
+               igp_info = (union igp_info *)(mode_info->atom_context->bios +
+                       data_offset);
+               rdev->clock.vco_freq =
+                       le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+       }
+}
+
 bool radeon_atom_get_clock_info(struct drm_device *dev)
 {
        struct radeon_device *rdev = dev->dev_private;
@@ -1257,12 +1282,18 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                rdev->mode_info.firmware_flags =
                        le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
 
-               if (ASIC_IS_DCE8(rdev)) {
-                       rdev->clock.gpupll_outputfreq =
+               if (ASIC_IS_DCE8(rdev))
+                       rdev->clock.vco_freq =
                                le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
-                       if (rdev->clock.gpupll_outputfreq == 0)
-                               rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */
-               }
+               else if (ASIC_IS_DCE5(rdev))
+                       rdev->clock.vco_freq = rdev->clock.current_dispclk;
+               else if (ASIC_IS_DCE41(rdev))
+                       radeon_atombios_get_dentist_vco_freq(rdev);
+               else
+                       rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+               if (rdev->clock.vco_freq == 0)
+                       rdev->clock.vco_freq = 360000;  /* 3.6 GHz */
 
                return true;
        }
@@ -1270,14 +1301,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
        return false;
 }
 
-union igp_info {
-       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
 bool radeon_atombios_sideport_present(struct radeon_device *rdev)
 {
        struct radeon_mode_info *mode_info = &rdev->mode_info;
index 2c02e99b5f95a65a26357f26d67e91a45f8224a4..b214663b370da00905faa882ea27513153aa91bd 100644 (file)
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_connector->con_priv;
 
        if (!dig || !dig->afmt)
                return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
                radeon_audio_write_speaker_allocation(encoder);
                radeon_audio_write_sad_regs(encoder);
                radeon_audio_write_latency_fields(encoder, mode);
-               if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
-                       radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
-               else
-                       radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+               radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
                radeon_audio_set_audio_packet(encoder);
                radeon_audio_select_pin(encoder);
 
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
        if (radeon_encoder->audio && radeon_encoder->audio->dpms)
                radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
 }
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+       if (div >= 8 && div < 64)
+               return (div - 8) * 25 + 200;
+       else if (div >= 64 && div < 96)
+               return (div - 64) * 50 + 1600;
+       else if (div >= 96 && div < 128)
+               return (div - 96) * 100 + 3200;
+       else
+               return 0;
+}
index 059cc3012062a7b50708620c557771dcfea821c0..5c70cceaa4a6c35e673ff9c50306e36e7f93ff3d 100644 (file)
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
 void radeon_audio_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
 void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
 
 #endif
index a9b01bcf7d0a2242cf181ef31a77cf9150e6e8f8..432480ff9d228857d57170b3353c143bf0501c3f 100644 (file)
@@ -34,7 +34,6 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #endif /* CONFIG_PPC_PMAC */
 
 /* from radeon_legacy_encoder.c */
index b3bb92368ae0e98372e55caab123fb720ecdc48d..298ea1c453c3638fecdf6cef5afce1a48fdefa82 100644 (file)
@@ -1670,8 +1670,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
        /* setup afmt */
        radeon_afmt_init(rdev);
 
-       radeon_fbdev_init(rdev);
-       drm_kms_helper_poll_init(rdev->ddev);
+       if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
+               radeon_fbdev_init(rdev);
+               drm_kms_helper_poll_init(rdev->ddev);
+       }
 
        /* do pm late init */
        ret = radeon_pm_late_init(rdev);
index 3dcc5733ff6915b2e2497ca3d4ff800455f49c20..e26c963f2e9304e58f25517114e773b3c85242bd 100644 (file)
@@ -663,6 +663,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
        bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
        if (!bo_va) {
                args->operation = RADEON_VA_RESULT_ERROR;
+               radeon_bo_unreserve(rbo);
                drm_gem_object_unreference_unlocked(gobj);
                return -ENOENT;
        }
index 84d45633d28c9b1c58f58dc6ec6383eba2207230..fb6ad143873f5b40d2c027a20dd25914dbd5e29e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
 #include "radeon.h"
 #include "radeon_trace.h"
 
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
                DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
                              "better performance thanks to write-combining\n");
        bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+       /* For architectures that don't support WC memory,
+        * mask out the WC flag from the BO
+        */
+       if (!drm_arch_can_wc_memory())
+               bo->flags &= ~RADEON_GEM_GTT_WC;
 #endif
 
        radeon_ttm_placement_from_domain(bo, domain);
index 07a0d378e122b1b206b8aceb1a97234254d00e74..a01efe39a8206ec1b1d1fd26a3192c5d72abb885 100644 (file)
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
                return -EINVAL;
        }
 
-       for (i = 0; i < sign->num; ++i) {
-               if (sign->val[i].chip_id == chip_id)
+       for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+               if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
                        break;
        }
 
-       if (i == sign->num)
+       if (i == le32_to_cpu(sign->num))
                return -EINVAL;
 
        data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
        data[1] = sign->val[i].nonce[1];
        data[2] = sign->val[i].nonce[2];
        data[3] = sign->val[i].nonce[3];
-       data[4] = sign->len + 64;
+       data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
 
        memset(&data[5], 0, 44);
        memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
 
-       data += data[4] / 4;
+       data += le32_to_cpu(data[4]) / 4;
        data[0] = sign->val[i].sigval[0];
        data[1] = sign->val[i].sigval[1];
        data[2] = sign->val[i].sigval[2];
        data[3] = sign->val[i].sigval[3];
 
-       rdev->vce.keyselect = sign->val[i].keyselect;
+       rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
 
        return 0;
 }
index d1dc0f7b01dbdf48ad97e2829278b7f61bb6efea..f6a809afceec2e35237c855df8d2803a09674147 100644 (file)
@@ -2,11 +2,11 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
-               rockchip_drm_gem.o
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
+               rockchip_drm_gem.o rockchip_drm_vop.o
+rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
 obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
 
-obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \
-                               rockchip_vop_reg.o
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o
index 7bfe243c61737bd112cf591366611ce545e86598..f8f8f29fb7c336d8fffe1a74bddbad66e01854f6 100644 (file)
@@ -461,10 +461,11 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
 
 static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi)
 {
-       unsigned int bpp, i, pre;
+       unsigned int i, pre;
        unsigned long mpclk, pllref, tmp;
        unsigned int m = 1, n = 1, target_mbps = 1000;
        unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
+       int bpp;
 
        bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
        if (bpp < 0) {
index 8397d1b62ef9460910bb28c96e0675a53f5d1b50..a0d51ccb6ea449802a0ff4fac1bcd8b4cf510d3a 100644 (file)
@@ -55,14 +55,12 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
 
        return arm_iommu_attach_device(dev, mapping);
 }
-EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
 
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
                                    struct device *dev)
 {
        arm_iommu_detach_device(dev);
 }
-EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
 
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
                                 const struct rockchip_crtc_funcs *crtc_funcs)
@@ -77,7 +75,6 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
 
 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
 {
@@ -89,7 +86,6 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
 
        priv->crtc_funcs[pipe] = NULL;
 }
-EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
 
 static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
                                                int pipe)
index f7844883cb76cb8cb00c434aaeb9d85d63b0160a..3b8f652698f828574c19f624264e816ab1299ee9 100644 (file)
@@ -39,7 +39,6 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
 
        return rk_fb->obj[plane];
 }
-EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
 
 static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
 {
@@ -177,8 +176,23 @@ static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
                crtc_funcs->wait_for_update(crtc);
 }
 
+/*
+ * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
+ * have hardware counters for neither vblanks nor scanlines, which results in
+ * a race where:
+ *                             | <-- HW vsync irq and reg take effect
+ *            plane_commit --> |
+ *     get_vblank and wait --> |
+ *                             | <-- handle_vblank, vblank->count + 1
+ *              cleanup_fb --> |
+ *             iommu crash --> |
+ *                             | <-- HW vsync irq and reg take effect
+ *
+ * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
+ * of waiting for vblank_count to change.
+ */
 static void
-rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
+rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
        struct drm_crtc_state *old_crtc_state;
        struct drm_crtc *crtc;
@@ -194,6 +208,10 @@ rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
                if (!crtc->state->active)
                        continue;
 
+               if (!drm_atomic_helper_framebuffer_changed(dev,
+                               old_state, crtc))
+                       continue;
+
                ret = drm_crtc_vblank_get(crtc);
                if (ret != 0)
                        continue;
@@ -241,7 +259,7 @@ rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
 
        drm_atomic_helper_commit_planes(dev, state, true);
 
-       rockchip_atomic_wait_for_complete(state);
+       rockchip_atomic_wait_for_complete(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 
index 50432e9b5b37f38fcb3c3a10cf28573bf2d59163..73718c5f5bbf720ca6151fc44ca670fc8646c6a9 100644 (file)
 #ifndef _ROCKCHIP_DRM_FBDEV_H
 #define _ROCKCHIP_DRM_FBDEV_H
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 int rockchip_drm_fbdev_init(struct drm_device *dev);
 void rockchip_drm_fbdev_fini(struct drm_device *dev);
+#else
+static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+#endif
 
 #endif /* _ROCKCHIP_DRM_FBDEV_H */
index d908321b94ced4ffe10936f4c7ef37fc518d127a..18e07338c6e5f31f45b9a38c17884164fc476cff 100644 (file)
@@ -234,13 +234,8 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
        /*
         * align to 64 bytes since Mali requires it.
         */
-       min_pitch = ALIGN(min_pitch, 64);
-
-       if (args->pitch < min_pitch)
-               args->pitch = min_pitch;
-
-       if (args->size < args->pitch * args->height)
-               args->size = args->pitch * args->height;
+       args->pitch = ALIGN(min_pitch, 64);
+       args->size = args->pitch * args->height;
 
        rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
                                                 &args->handle);
index 46c2a8dfd8aa78b22caf02d9b1e217b8d011dd25..fd370548d7d75dc1990226110cf24ea0353a002c 100644 (file)
@@ -43,8 +43,8 @@
 
 #define REG_SET(x, base, reg, v, mode) \
                __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
-#define REG_SET_MASK(x, base, reg, v, mode) \
-               __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+#define REG_SET_MASK(x, base, reg, mask, v, mode) \
+               __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
 
 #define VOP_WIN_SET(x, win, name, v) \
                REG_SET(x, win->base, win->phy->name, v, RELAXED)
 #define VOP_INTR_GET(vop, name) \
                vop_read_reg(vop, 0, &vop->data->ctrl->name)
 
-#define VOP_INTR_SET(vop, name, v) \
-               REG_SET(vop, 0, vop->data->intr->name, v, NORMAL)
+#define VOP_INTR_SET(vop, name, mask, v) \
+               REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
 #define VOP_INTR_SET_TYPE(vop, name, type, v) \
        do { \
-               int i, reg = 0; \
+               int i, reg = 0, mask = 0; \
                for (i = 0; i < vop->data->intr->nintrs; i++) { \
-                       if (vop->data->intr->intrs[i] & type) \
+                       if (vop->data->intr->intrs[i] & type) \
                                reg |= (v) << i; \
+                               mask |= 1 << i; \
+                       } \
                } \
-               VOP_INTR_SET(vop, name, reg); \
+               VOP_INTR_SET(vop, name, mask, reg); \
        } while (0)
 #define VOP_INTR_GET_TYPE(vop, name, type) \
                vop_get_intr_type(vop, &vop->data->intr->name, type)
index 424d515ffcdafae650c9d472a02600db7c0a8485..314ff71db978dedfd8272ea0afe0087f1903ca4a 100644 (file)
@@ -144,19 +144,16 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
 }
 #endif /* CONFIG_DEBUG_FS */
 
-/*
- * Asks the firmware to turn on power to the V3D engine.
- *
- * This may be doable with just the clocks interface, though this
- * packet does some other register setup from the firmware, too.
- */
 int
 vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
 {
-       if (on)
-               return pm_generic_poweroff(&vc4->v3d->pdev->dev);
-       else
-               return pm_generic_resume(&vc4->v3d->pdev->dev);
+       /* XXX: This interface is needed for GPU reset, and the way to
+        * do it is to turn our power domain off and back on.  We
+        * can't just reset from within the driver, because the reset
+        * bits are in the power domain's register area, and get set
+        * during the poweron process.
+        */
+       return 0;
 }
 
 static void vc4_v3d_init_hw(struct drm_device *dev)
index c49812b80dd0dae82c77096f75fbd4cced19ffb1..24fb348a44e141f71574f20d1e57bdaad2759ce6 100644 (file)
@@ -25,6 +25,7 @@
  *
  **************************************************************************/
 #include <linux/module.h>
+#include <linux/console.h>
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
@@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int __init vmwgfx_init(void)
 {
        int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force())
+               return -EINVAL;
+#endif
+
        ret = drm_pci_init(&driver, &vmw_pci_driver);
        if (ret)
                DRM_ERROR("Failed initializing DRM.\n");
index 7e89288b15375a2e8016ea6b31f5904e95c32719..e637e4ff1c88aed8e4d9cd36d70c0143805675df 100644 (file)
@@ -1075,7 +1075,7 @@ static u32 s32ton(__s32 value, unsigned n)
  * Extract/implement a data field from/to a little endian report (bit array).
  *
  * Code sort-of follows HID spec:
- *     http://www.usb.org/developers/devclass_docs/HID1_11.pdf
+ *     http://www.usb.org/developers/hidpage/HID1_11.pdf
  *
  * While the USB HID spec allows unlimited length bit fields in "report
  * descriptors", most devices never use more than 16 bits.
@@ -1083,20 +1083,37 @@ static u32 s32ton(__s32 value, unsigned n)
  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
  */
 
-__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
-                    unsigned offset, unsigned n)
-{
-       u64 x;
+static u32 __extract(u8 *report, unsigned offset, int n)
+{
+       unsigned int idx = offset / 8;
+       unsigned int bit_nr = 0;
+       unsigned int bit_shift = offset % 8;
+       int bits_to_copy = 8 - bit_shift;
+       u32 value = 0;
+       u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
+
+       while (n > 0) {
+               value |= ((u32)report[idx] >> bit_shift) << bit_nr;
+               n -= bits_to_copy;
+               bit_nr += bits_to_copy;
+               bits_to_copy = 8;
+               bit_shift = 0;
+               idx++;
+       }
+
+       return value & mask;
+}
 
-       if (n > 32)
+u32 hid_field_extract(const struct hid_device *hid, u8 *report,
+                       unsigned offset, unsigned n)
+{
+       if (n > 32) {
                hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
                         n, current->comm);
+               n = 32;
+       }
 
-       report += offset >> 3;  /* adjust byte index */
-       offset &= 7;            /* now only need bit offset into one byte */
-       x = get_unaligned_le64(report);
-       x = (x >> offset) & ((1ULL << n) - 1);  /* extract bit field */
-       return (u32) x;
+       return __extract(report, offset, n);
 }
 EXPORT_SYMBOL_GPL(hid_field_extract);
 
@@ -1106,31 +1123,56 @@ EXPORT_SYMBOL_GPL(hid_field_extract);
  * The data mangled in the bit stream remains in little endian
  * order the whole time. It make more sense to talk about
  * endianness of register values by considering a register
- * a "cached" copy of the little endiad bit stream.
+ * a "cached" copy of the little endian bit stream.
  */
-static void implement(const struct hid_device *hid, __u8 *report,
-                     unsigned offset, unsigned n, __u32 value)
+
+static void __implement(u8 *report, unsigned offset, int n, u32 value)
+{
+       unsigned int idx = offset / 8;
+       unsigned int size = offset + n;
+       unsigned int bit_shift = offset % 8;
+       int bits_to_set = 8 - bit_shift;
+       u8 bit_mask = 0xff << bit_shift;
+
+       while (n - bits_to_set >= 0) {
+               report[idx] &= ~bit_mask;
+               report[idx] |= value << bit_shift;
+               value >>= bits_to_set;
+               n -= bits_to_set;
+               bits_to_set = 8;
+               bit_mask = 0xff;
+               bit_shift = 0;
+               idx++;
+       }
+
+       /* last nibble */
+       if (n) {
+               if (size % 8)
+                       bit_mask &= (1U << (size % 8)) - 1;
+               report[idx] &= ~bit_mask;
+               report[idx] |= (value << bit_shift) & bit_mask;
+       }
+}
+
+static void implement(const struct hid_device *hid, u8 *report,
+                     unsigned offset, unsigned n, u32 value)
 {
-       u64 x;
-       u64 m = (1ULL << n) - 1;
+       u64 m;
 
-       if (n > 32)
+       if (n > 32) {
                hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
                         __func__, n, current->comm);
+               n = 32;
+       }
 
+       m = (1ULL << n) - 1;
        if (value > m)
                hid_warn(hid, "%s() called with too large value %d! (%s)\n",
                         __func__, value, current->comm);
        WARN_ON(value > m);
        value &= m;
 
-       report += offset >> 3;
-       offset &= 7;
-
-       x = get_unaligned_le64(report);
-       x &= ~(m << offset);
-       x |= ((u64)value) << offset;
-       put_unaligned_le64(x, report);
+       __implement(report, offset, n, value);
 }
 
 /*
@@ -1251,6 +1293,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
                /* Ignore report if ErrorRollOver */
                if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
                    value[n] >= min && value[n] <= max &&
+                   value[n] - min < field->maxusage &&
                    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
                        goto exit;
        }
@@ -1263,11 +1306,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
                }
 
                if (field->value[n] >= min && field->value[n] <= max
+                       && field->value[n] - min < field->maxusage
                        && field->usage[field->value[n] - min].hid
                        && search(value, field->value[n], count))
                                hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
 
                if (value[n] >= min && value[n] <= max
+                       && value[n] - min < field->maxusage
                        && field->usage[value[n] - min].hid
                        && search(field->value, value[n], count))
                                hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
@@ -2003,6 +2048,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
index 1d78ba3b799e6f020eee8f9e16c9cab5b8f61627..8fd4bf77f264940ec04631252062e06aafe148c8 100644 (file)
@@ -151,7 +151,7 @@ static inline int drff_init(struct hid_device *hid)
  * descriptor. In any case, it's a wonder it works on Windows.
  *
  *  Usage Page (Desktop),             ; Generic desktop controls (01h)
- *  Usage (Joystik),                  ; Joystik (04h, application collection)
+ *  Usage (Joystick),                 ; Joystick (04h, application collection)
  *  Collection (Application),
  *    Collection (Logical),
  *      Report Size (8),
@@ -207,7 +207,7 @@ static inline int drff_init(struct hid_device *hid)
 /* Fixed report descriptor for PID 0x011 joystick */
 static __u8 pid0011_rdesc_fixed[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),           */
-       0x09, 0x04,         /*  Usage (Joystik),                */
+       0x09, 0x04,         /*  Usage (Joystick),               */
        0xA1, 0x01,         /*  Collection (Application),       */
        0xA1, 0x02,         /*      Collection (Logical),       */
        0x14,               /*          Logical Minimum (0),    */
index b6ff6e78ac54e281a8617212024147d63c891346..96fefdbbc75b28ae07a80d7c36665fd9033e84b0 100644 (file)
@@ -61,6 +61,9 @@
 #define USB_VENDOR_ID_AIREN            0x1a2c
 #define USB_DEVICE_ID_AIREN_SLIMPLUS   0x0002
 
+#define USB_VENDOR_ID_AKAI             0x2011
+#define USB_DEVICE_ID_AKAI_MPKMINI2    0x0715
+
 #define USB_VENDOR_ID_ALCOR            0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232   0x9720
 
 #define USB_VENDOR_ID_QUANTA           0x0408
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH             0x3000
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001                0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003                0x3003
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008                0x3008
 
 #define USB_VENDOR_ID_RAZER            0x1532
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER             0x0002
 #define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER    0x1000
 
+#define USB_VENDOR_ID_SINO_LITE                        0x1345
+#define USB_DEVICE_ID_SINO_LITE_CONTROLLER     0x3008
+
 #define USB_VENDOR_ID_SOUNDGRAPH       0x15c2
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST    0x0034
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST     0x0046
 #define USB_DEVICE_ID_RI_KA_WEBMAIL    0x1320  /* Webmail Notifier */
 
 #define USB_VENDOR_ID_MULTIPLE_1781    0x1781
-#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD    0x0a8d
+#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD    0x0a9d
 
 #define USB_VENDOR_ID_DRACAL_RAPHNET   0x289b
 #define USB_DEVICE_ID_RAPHNET_2NES2SNES        0x0002
index c690fae02cf823d16b6d985d944b6bda143b6282..feb2be71f77c199e9ab21fabd0e6ffcd267d2d9e 100644 (file)
@@ -61,7 +61,7 @@
  */
 static __u8 df_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),                   */
-0x09, 0x04,         /*  Usage (Joystik),                        */
+0x09, 0x04,         /*  Usage (Joystick),                       */
 0xA1, 0x01,         /*  Collection (Application),               */
 0xA1, 0x02,         /*      Collection (Logical),               */
 0x95, 0x01,         /*          Report Count (1),               */
@@ -127,7 +127,7 @@ static __u8 df_rdesc_fixed[] = {
 
 static __u8 dfp_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),                   */
-0x09, 0x04,         /*  Usage (Joystik),                        */
+0x09, 0x04,         /*  Usage (Joystick),                       */
 0xA1, 0x01,         /*  Collection (Application),               */
 0xA1, 0x02,         /*      Collection (Logical),               */
 0x95, 0x01,         /*          Report Count (1),               */
@@ -175,7 +175,7 @@ static __u8 dfp_rdesc_fixed[] = {
 
 static __u8 fv_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),                   */
-0x09, 0x04,         /*  Usage (Joystik),                        */
+0x09, 0x04,         /*  Usage (Joystick),                       */
 0xA1, 0x01,         /*  Collection (Application),               */
 0xA1, 0x02,         /*      Collection (Logical),               */
 0x95, 0x01,         /*          Report Count (1),               */
@@ -242,7 +242,7 @@ static __u8 fv_rdesc_fixed[] = {
 
 static __u8 momo_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
-0x09, 0x04,         /*  Usage (Joystik),                    */
+0x09, 0x04,         /*  Usage (Joystick),                   */
 0xA1, 0x01,         /*  Collection (Application),           */
 0xA1, 0x02,         /*      Collection (Logical),           */
 0x95, 0x01,         /*          Report Count (1),           */
@@ -288,7 +288,7 @@ static __u8 momo_rdesc_fixed[] = {
 
 static __u8 momo2_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
-0x09, 0x04,         /*  Usage (Joystik),                    */
+0x09, 0x04,         /*  Usage (Joystick),                   */
 0xA1, 0x01,         /*  Collection (Application),           */
 0xA1, 0x02,         /*      Collection (Logical),           */
 0x95, 0x01,         /*          Report Count (1),           */
index bd2ab476c65ef9f73a04a1f732dc7f75a3b49835..2e2515a4c070eac305a834c9229d0e1fa27f722f 100644 (file)
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/device.h>
+#include <linux/input.h>
+#include <linux/usb.h>
 #include <linux/hid.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/kfifo.h>
 #include <linux/input/mt.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/fixp-arith.h>
 #include <asm/unaligned.h>
+#include "usbhid/usbhid.h"
 #include "hid-ids.h"
 
 MODULE_LICENSE("GPL");
@@ -773,6 +779,589 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
        }
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x8123: Force feedback support                                             */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_FF_GET_INFO              0x01
+#define HIDPP_FF_RESET_ALL             0x11
+#define HIDPP_FF_DOWNLOAD_EFFECT       0x21
+#define HIDPP_FF_SET_EFFECT_STATE      0x31
+#define HIDPP_FF_DESTROY_EFFECT                0x41
+#define HIDPP_FF_GET_APERTURE          0x51
+#define HIDPP_FF_SET_APERTURE          0x61
+#define HIDPP_FF_GET_GLOBAL_GAINS      0x71
+#define HIDPP_FF_SET_GLOBAL_GAINS      0x81
+
+#define HIDPP_FF_EFFECT_STATE_GET      0x00
+#define HIDPP_FF_EFFECT_STATE_STOP     0x01
+#define HIDPP_FF_EFFECT_STATE_PLAY     0x02
+#define HIDPP_FF_EFFECT_STATE_PAUSE    0x03
+
+#define HIDPP_FF_EFFECT_CONSTANT       0x00
+#define HIDPP_FF_EFFECT_PERIODIC_SINE          0x01
+#define HIDPP_FF_EFFECT_PERIODIC_SQUARE                0x02
+#define HIDPP_FF_EFFECT_PERIODIC_TRIANGLE      0x03
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP    0x04
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN  0x05
+#define HIDPP_FF_EFFECT_SPRING         0x06
+#define HIDPP_FF_EFFECT_DAMPER         0x07
+#define HIDPP_FF_EFFECT_FRICTION       0x08
+#define HIDPP_FF_EFFECT_INERTIA                0x09
+#define HIDPP_FF_EFFECT_RAMP           0x0A
+
+#define HIDPP_FF_EFFECT_AUTOSTART      0x80
+
+#define HIDPP_FF_EFFECTID_NONE         -1
+#define HIDPP_FF_EFFECTID_AUTOCENTER   -2
+
+#define HIDPP_FF_MAX_PARAMS    20
+#define HIDPP_FF_RESERVED_SLOTS        1
+
+struct hidpp_ff_private_data {
+       struct hidpp_device *hidpp;
+       u8 feature_index;
+       u8 version;
+       u16 gain;
+       s16 range;
+       u8 slot_autocenter;
+       u8 num_effects;
+       int *effect_ids;
+       struct workqueue_struct *wq;
+       atomic_t workqueue_size;
+};
+
+struct hidpp_ff_work_data {
+       struct work_struct work;
+       struct hidpp_ff_private_data *data;
+       int effect_id;
+       u8 command;
+       u8 params[HIDPP_FF_MAX_PARAMS];
+       u8 size;
+};
+
+static const signed short hiddpp_ff_effects[] = {
+       FF_CONSTANT,
+       FF_PERIODIC,
+       FF_SINE,
+       FF_SQUARE,
+       FF_SAW_UP,
+       FF_SAW_DOWN,
+       FF_TRIANGLE,
+       FF_SPRING,
+       FF_DAMPER,
+       FF_AUTOCENTER,
+       FF_GAIN,
+       -1
+};
+
+static const signed short hiddpp_ff_effects_v2[] = {
+       FF_RAMP,
+       FF_FRICTION,
+       FF_INERTIA,
+       -1
+};
+
+static const u8 HIDPP_FF_CONDITION_CMDS[] = {
+       HIDPP_FF_EFFECT_SPRING,
+       HIDPP_FF_EFFECT_FRICTION,
+       HIDPP_FF_EFFECT_DAMPER,
+       HIDPP_FF_EFFECT_INERTIA
+};
+
+static const char *HIDPP_FF_CONDITION_NAMES[] = {
+       "spring",
+       "friction",
+       "damper",
+       "inertia"
+};
+
+
+static u8 hidpp_ff_find_effect(struct hidpp_ff_private_data *data, int effect_id)
+{
+       int i;
+
+       for (i = 0; i < data->num_effects; i++)
+               if (data->effect_ids[i] == effect_id)
+                       return i+1;
+
+       return 0;
+}
+
+static void hidpp_ff_work_handler(struct work_struct *w)
+{
+       struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work);
+       struct hidpp_ff_private_data *data = wd->data;
+       struct hidpp_report response;
+       u8 slot;
+       int ret;
+
+       /* add slot number if needed */
+       switch (wd->effect_id) {
+       case HIDPP_FF_EFFECTID_AUTOCENTER:
+               wd->params[0] = data->slot_autocenter;
+               break;
+       case HIDPP_FF_EFFECTID_NONE:
+               /* leave slot as zero */
+               break;
+       default:
+               /* find current slot for effect */
+               wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id);
+               break;
+       }
+
+       /* send command and wait for reply */
+       ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index,
+               wd->command, wd->params, wd->size, &response);
+
+       if (ret) {
+               hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n");
+               goto out;
+       }
+
+       /* parse return data */
+       switch (wd->command) {
+       case HIDPP_FF_DOWNLOAD_EFFECT:
+               slot = response.fap.params[0];
+               if (slot > 0 && slot <= data->num_effects) {
+                       if (wd->effect_id >= 0)
+                               /* regular effect uploaded */
+                               data->effect_ids[slot-1] = wd->effect_id;
+                       else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+                               /* autocenter spring uploaded */
+                               data->slot_autocenter = slot;
+               }
+               break;
+       case HIDPP_FF_DESTROY_EFFECT:
+               if (wd->effect_id >= 0)
+                       /* regular effect destroyed */
+                       data->effect_ids[wd->params[0]-1] = -1;
+               else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+                       /* autocenter spring destoyed */
+                       data->slot_autocenter = 0;
+               break;
+       case HIDPP_FF_SET_GLOBAL_GAINS:
+               data->gain = (wd->params[0] << 8) + wd->params[1];
+               break;
+       case HIDPP_FF_SET_APERTURE:
+               data->range = (wd->params[0] << 8) + wd->params[1];
+               break;
+       default:
+               /* no action needed */
+               break;
+       }
+
+out:
+       atomic_dec(&data->workqueue_size);
+       kfree(wd);
+}
+
+static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id, u8 command, u8 *params, u8 size)
+{
+       struct hidpp_ff_work_data *wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+       int s;
+
+       if (!wd)
+               return -ENOMEM;
+
+       INIT_WORK(&wd->work, hidpp_ff_work_handler);
+
+       wd->data = data;
+       wd->effect_id = effect_id;
+       wd->command = command;
+       wd->size = size;
+       memcpy(wd->params, params, size);
+
+       atomic_inc(&data->workqueue_size);
+       queue_work(data->wq, &wd->work);
+
+       /* warn about excessive queue size */
+       s = atomic_read(&data->workqueue_size);
+       if (s >= 20 && s % 20 == 0)
+               hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s);
+
+       return 0;
+}
+
+static int hidpp_ff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[20];
+       u8 size;
+       int force;
+
+       /* set common parameters */
+       params[2] = effect->replay.length >> 8;
+       params[3] = effect->replay.length & 255;
+       params[4] = effect->replay.delay >> 8;
+       params[5] = effect->replay.delay & 255;
+
+       switch (effect->type) {
+       case FF_CONSTANT:
+               force = (effect->u.constant.level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[1] = HIDPP_FF_EFFECT_CONSTANT;
+               params[6] = force >> 8;
+               params[7] = force & 255;
+               params[8] = effect->u.constant.envelope.attack_level >> 7;
+               params[9] = effect->u.constant.envelope.attack_length >> 8;
+               params[10] = effect->u.constant.envelope.attack_length & 255;
+               params[11] = effect->u.constant.envelope.fade_level >> 7;
+               params[12] = effect->u.constant.envelope.fade_length >> 8;
+               params[13] = effect->u.constant.envelope.fade_length & 255;
+               size = 14;
+               dbg_hid("Uploading constant force level=%d in dir %d = %d\n",
+                               effect->u.constant.level,
+                               effect->direction, force);
+               dbg_hid("          envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+                               effect->u.constant.envelope.attack_level,
+                               effect->u.constant.envelope.attack_length,
+                               effect->u.constant.envelope.fade_level,
+                               effect->u.constant.envelope.fade_length);
+               break;
+       case FF_PERIODIC:
+       {
+               switch (effect->u.periodic.waveform) {
+               case FF_SINE:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SINE;
+                       break;
+               case FF_SQUARE:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SQUARE;
+                       break;
+               case FF_SAW_UP:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP;
+                       break;
+               case FF_SAW_DOWN:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN;
+                       break;
+               case FF_TRIANGLE:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_TRIANGLE;
+                       break;
+               default:
+                       hid_err(data->hidpp->hid_dev, "Unexpected periodic waveform type %i!\n", effect->u.periodic.waveform);
+                       return -EINVAL;
+               }
+               force = (effect->u.periodic.magnitude * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[6] = effect->u.periodic.magnitude >> 8;
+               params[7] = effect->u.periodic.magnitude & 255;
+               params[8] = effect->u.periodic.offset >> 8;
+               params[9] = effect->u.periodic.offset & 255;
+               params[10] = effect->u.periodic.period >> 8;
+               params[11] = effect->u.periodic.period & 255;
+               params[12] = effect->u.periodic.phase >> 8;
+               params[13] = effect->u.periodic.phase & 255;
+               params[14] = effect->u.periodic.envelope.attack_level >> 7;
+               params[15] = effect->u.periodic.envelope.attack_length >> 8;
+               params[16] = effect->u.periodic.envelope.attack_length & 255;
+               params[17] = effect->u.periodic.envelope.fade_level >> 7;
+               params[18] = effect->u.periodic.envelope.fade_length >> 8;
+               params[19] = effect->u.periodic.envelope.fade_length & 255;
+               size = 20;
+               dbg_hid("Uploading periodic force mag=%d/dir=%d, offset=%d, period=%d ms, phase=%d\n",
+                               effect->u.periodic.magnitude, effect->direction,
+                               effect->u.periodic.offset,
+                               effect->u.periodic.period,
+                               effect->u.periodic.phase);
+               dbg_hid("          envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+                               effect->u.periodic.envelope.attack_level,
+                               effect->u.periodic.envelope.attack_length,
+                               effect->u.periodic.envelope.fade_level,
+                               effect->u.periodic.envelope.fade_length);
+               break;
+       }
+       case FF_RAMP:
+               params[1] = HIDPP_FF_EFFECT_RAMP;
+               force = (effect->u.ramp.start_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[6] = force >> 8;
+               params[7] = force & 255;
+               force = (effect->u.ramp.end_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[8] = force >> 8;
+               params[9] = force & 255;
+               params[10] = effect->u.ramp.envelope.attack_level >> 7;
+               params[11] = effect->u.ramp.envelope.attack_length >> 8;
+               params[12] = effect->u.ramp.envelope.attack_length & 255;
+               params[13] = effect->u.ramp.envelope.fade_level >> 7;
+               params[14] = effect->u.ramp.envelope.fade_length >> 8;
+               params[15] = effect->u.ramp.envelope.fade_length & 255;
+               size = 16;
+               dbg_hid("Uploading ramp force level=%d -> %d in dir %d = %d\n",
+                               effect->u.ramp.start_level,
+                               effect->u.ramp.end_level,
+                               effect->direction, force);
+               dbg_hid("          envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+                               effect->u.ramp.envelope.attack_level,
+                               effect->u.ramp.envelope.attack_length,
+                               effect->u.ramp.envelope.fade_level,
+                               effect->u.ramp.envelope.fade_length);
+               break;
+       case FF_FRICTION:
+       case FF_INERTIA:
+       case FF_SPRING:
+       case FF_DAMPER:
+               params[1] = HIDPP_FF_CONDITION_CMDS[effect->type - FF_SPRING];
+               params[6] = effect->u.condition[0].left_saturation >> 9;
+               params[7] = (effect->u.condition[0].left_saturation >> 1) & 255;
+               params[8] = effect->u.condition[0].left_coeff >> 8;
+               params[9] = effect->u.condition[0].left_coeff & 255;
+               params[10] = effect->u.condition[0].deadband >> 9;
+               params[11] = (effect->u.condition[0].deadband >> 1) & 255;
+               params[12] = effect->u.condition[0].center >> 8;
+               params[13] = effect->u.condition[0].center & 255;
+               params[14] = effect->u.condition[0].right_coeff >> 8;
+               params[15] = effect->u.condition[0].right_coeff & 255;
+               params[16] = effect->u.condition[0].right_saturation >> 9;
+               params[17] = (effect->u.condition[0].right_saturation >> 1) & 255;
+               size = 18;
+               dbg_hid("Uploading %s force left coeff=%d, left sat=%d, right coeff=%d, right sat=%d\n",
+                               HIDPP_FF_CONDITION_NAMES[effect->type - FF_SPRING],
+                               effect->u.condition[0].left_coeff,
+                               effect->u.condition[0].left_saturation,
+                               effect->u.condition[0].right_coeff,
+                               effect->u.condition[0].right_saturation);
+               dbg_hid("          deadband=%d, center=%d\n",
+                               effect->u.condition[0].deadband,
+                               effect->u.condition[0].center);
+               break;
+       default:
+               hid_err(data->hidpp->hid_dev, "Unexpected force type %i!\n", effect->type);
+               return -EINVAL;
+       }
+
+       return hidpp_ff_queue_work(data, effect->id, HIDPP_FF_DOWNLOAD_EFFECT, params, size);
+}
+
+static int hidpp_ff_playback(struct input_dev *dev, int effect_id, int value)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[2];
+
+       params[1] = value ? HIDPP_FF_EFFECT_STATE_PLAY : HIDPP_FF_EFFECT_STATE_STOP;
+
+       dbg_hid("St%sing playback of effect %d.\n", value?"art":"opp", effect_id);
+
+       return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_SET_EFFECT_STATE, params, ARRAY_SIZE(params));
+}
+
+static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 slot = 0;
+
+       dbg_hid("Erasing effect %d.\n", effect_id);
+
+       return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_DESTROY_EFFECT, &slot, 1);
+}
+
+static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[18];
+
+       dbg_hid("Setting autocenter to %d.\n", magnitude);
+
+       /* start a standard spring effect */
+       params[1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART;
+       /* zero delay and duration */
+       params[2] = params[3] = params[4] = params[5] = 0;
+       /* set coeff to 25% of saturation */
+       params[8] = params[14] = magnitude >> 11;
+       params[9] = params[15] = (magnitude >> 3) & 255;
+       params[6] = params[16] = magnitude >> 9;
+       params[7] = params[17] = (magnitude >> 1) & 255;
+       /* zero deadband and center */
+       params[10] = params[11] = params[12] = params[13] = 0;
+
+       hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_AUTOCENTER, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params));
+}
+
+static void hidpp_ff_set_gain(struct input_dev *dev, u16 gain)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[4];
+
+       dbg_hid("Setting gain to %d.\n", gain);
+
+       params[0] = gain >> 8;
+       params[1] = gain & 255;
+       params[2] = 0; /* no boost */
+       params[3] = 0;
+
+       hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_NONE, HIDPP_FF_SET_GLOBAL_GAINS, params, ARRAY_SIZE(params));
+}
+
+static ssize_t hidpp_ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct hid_device *hid = to_hid_device(dev);
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *idev = hidinput->input;
+       struct hidpp_ff_private_data *data = idev->ff->private;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", data->range);
+}
+
+static ssize_t hidpp_ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct hid_device *hid = to_hid_device(dev);
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *idev = hidinput->input;
+       struct hidpp_ff_private_data *data = idev->ff->private;
+       u8 params[2];
+       int range = simple_strtoul(buf, NULL, 10);
+
+       range = clamp(range, 180, 900);
+
+       params[0] = range >> 8;
+       params[1] = range & 0x00FF;
+
+       hidpp_ff_queue_work(data, -1, HIDPP_FF_SET_APERTURE, params, ARRAY_SIZE(params));
+
+       return count;
+}
+
+static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store);
+
+static void hidpp_ff_destroy(struct ff_device *ff)
+{
+       struct hidpp_ff_private_data *data = ff->private;
+
+       kfree(data->effect_ids);
+}
+
+static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+{
+       struct hid_device *hid = hidpp->hid_dev;
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *dev = hidinput->input;
+       const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+       const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
+       struct ff_device *ff;
+       struct hidpp_report response;
+       struct hidpp_ff_private_data *data;
+       int error, j, num_slots;
+       u8 version;
+
+       if (!dev) {
+               hid_err(hid, "Struct input_dev not set!\n");
+               return -EINVAL;
+       }
+
+       /* Get firmware release */
+       version = bcdDevice & 255;
+
+       /* Set supported force feedback capabilities */
+       for (j = 0; hiddpp_ff_effects[j] >= 0; j++)
+               set_bit(hiddpp_ff_effects[j], dev->ffbit);
+       if (version > 1)
+               for (j = 0; hiddpp_ff_effects_v2[j] >= 0; j++)
+                       set_bit(hiddpp_ff_effects_v2[j], dev->ffbit);
+
+       /* Read number of slots available in device */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_GET_INFO, NULL, 0, &response);
+       if (error) {
+               if (error < 0)
+                       return error;
+               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+                       __func__, error);
+               return -EPROTO;
+       }
+
+       num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+       error = input_ff_create(dev, num_slots);
+
+       if (error) {
+               hid_err(dev, "Failed to create FF device!\n");
+               return error;
+       }
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
+       if (!data->effect_ids) {
+               kfree(data);
+               return -ENOMEM;
+       }
+       data->hidpp = hidpp;
+       data->feature_index = feature_index;
+       data->version = version;
+       data->slot_autocenter = 0;
+       data->num_effects = num_slots;
+       for (j = 0; j < num_slots; j++)
+               data->effect_ids[j] = -1;
+
+       ff = dev->ff;
+       ff->private = data;
+
+       ff->upload = hidpp_ff_upload_effect;
+       ff->erase = hidpp_ff_erase_effect;
+       ff->playback = hidpp_ff_playback;
+       ff->set_gain = hidpp_ff_set_gain;
+       ff->set_autocenter = hidpp_ff_set_autocenter;
+       ff->destroy = hidpp_ff_destroy;
+
+
+       /* reset all forces */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_RESET_ALL, NULL, 0, &response);
+
+       /* Read current Range */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_GET_APERTURE, NULL, 0, &response);
+       if (error)
+               hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
+       data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
+
+       /* Create sysfs interface */
+       error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+       if (error)
+               hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
+
+       /* Read the current gain values */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
+       if (error)
+               hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
+       data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
+       /* ignore boost value at response.fap.params[2] */
+
+       /* init the hardware command queue */
+       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+       atomic_set(&data->workqueue_size, 0);
+
+       /* initialize with zero autocenter to get wheel in usable state */
+       hidpp_ff_set_autocenter(dev, 0);
+
+       hid_info(hid, "Force feeback support loaded (firmware release %d).\n", version);
+
+       return 0;
+}
+
+static int hidpp_ff_deinit(struct hid_device *hid)
+{
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *dev = hidinput->input;
+       struct hidpp_ff_private_data *data;
+
+       if (!dev) {
+               hid_err(hid, "Struct input_dev not found!\n");
+               return -EINVAL;
+       }
+
+       hid_info(hid, "Unloading HID++ force feedback.\n");
+       data = dev->ff->private;
+       if (!data) {
+               hid_err(hid, "Private data not found!\n");
+               return -EINVAL;
+       }
+
+       destroy_workqueue(data->wq);
+       device_remove_file(&hid->dev, &dev_attr_range);
+
+       return 0;
+}
+
+
 /* ************************************************************************** */
 /*                                                                            */
 /* Device Support                                                             */
@@ -1301,121 +1890,22 @@ static int k400_connect(struct hid_device *hdev, bool connected)
 
 #define HIDPP_PAGE_G920_FORCE_FEEDBACK                 0x8123
 
-/* Using session ID = 1 */
-#define CMD_G920_FORCE_GET_APERTURE                    0x51
-#define CMD_G920_FORCE_SET_APERTURE                    0x61
-
-struct g920_private_data {
-       u8 force_feature;
-       u16 range;
-};
-
-static ssize_t g920_range_show(struct device *dev, struct device_attribute *attr,
-                               char *buf)
-{
-       struct hid_device *hid = to_hid_device(dev);
-       struct hidpp_device *hidpp = hid_get_drvdata(hid);
-       struct g920_private_data *pdata;
-
-       pdata = hidpp->private_data;
-       if (!pdata) {
-               hid_err(hid, "Private driver data not found!\n");
-               return -EINVAL;
-       }
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", pdata->range);
-}
-
-static ssize_t g920_range_store(struct device *dev, struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct hid_device *hid = to_hid_device(dev);
-       struct hidpp_device *hidpp = hid_get_drvdata(hid);
-       struct g920_private_data *pdata;
-       struct hidpp_report response;
-       u8 params[2];
-       int ret;
-       u16 range = simple_strtoul(buf, NULL, 10);
-
-       pdata = hidpp->private_data;
-       if (!pdata) {
-               hid_err(hid, "Private driver data not found!\n");
-               return -EINVAL;
-       }
-
-       if (range < 180)
-               range = 180;
-       else if (range > 900)
-               range = 900;
-
-       params[0] = range >> 8;
-       params[1] = range & 0x00FF;
-
-       ret = hidpp_send_fap_command_sync(hidpp, pdata->force_feature,
-               CMD_G920_FORCE_SET_APERTURE, params, 2, &response);
-       if (ret)
-               return ret;
-
-       pdata->range = range;
-       return count;
-}
-
-static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, g920_range_show, g920_range_store);
-
-static int g920_allocate(struct hid_device *hdev)
-{
-       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-       struct g920_private_data *pdata;
-
-       pdata = devm_kzalloc(&hdev->dev, sizeof(struct g920_private_data),
-                       GFP_KERNEL);
-       if (!pdata)
-               return -ENOMEM;
-
-       hidpp->private_data = pdata;
-
-       return 0;
-}
-
 static int g920_get_config(struct hidpp_device *hidpp)
 {
-       struct g920_private_data *pdata = hidpp->private_data;
-       struct hidpp_report response;
        u8 feature_type;
        u8 feature_index;
        int ret;
 
-       pdata = hidpp->private_data;
-       if (!pdata) {
-               hid_err(hidpp->hid_dev, "Private driver data not found!\n");
-               return -EINVAL;
-       }
-
        /* Find feature and store for later use */
        ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
                &feature_index, &feature_type);
        if (ret)
                return ret;
 
-       pdata->force_feature = feature_index;
-
-       /* Read current Range */
-       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
-               CMD_G920_FORCE_GET_APERTURE, NULL, 0, &response);
-       if (ret > 0) {
-               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
-                       __func__, ret);
-               return -EPROTO;
-       }
-       if (ret)
-               return ret;
-
-       pdata->range = get_unaligned_be16(&response.fap.params[0]);
-
-       /* Create sysfs interface */
-       ret = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+       ret = hidpp_ff_init(hidpp, feature_index);
        if (ret)
-               hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d\n", ret);
+               hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
+                               ret);
 
        return 0;
 }
@@ -1739,10 +2229,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                ret = k400_allocate(hdev);
                if (ret)
                        goto allocate_fail;
-       } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-               ret = g920_allocate(hdev);
-               if (ret)
-                       goto allocate_fail;
        }
 
        INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -1825,7 +2311,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
 hid_hw_open_failed:
        hid_device_io_stop(hdev);
        if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-               device_remove_file(&hdev->dev, &dev_attr_range);
                hid_hw_close(hdev);
                hid_hw_stop(hdev);
        }
@@ -1843,7 +2328,7 @@ static void hidpp_remove(struct hid_device *hdev)
        struct hidpp_device *hidpp = hid_get_drvdata(hdev);
 
        if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-               device_remove_file(&hdev->dev, &dev_attr_range);
+               hidpp_ff_deinit(hdev);
                hid_hw_close(hdev);
        }
        hid_hw_stop(hdev);
index 296d4991560e45a9fed565c8a2d3cd463efb9ff1..6adb788bbfcaa13984a6a24f8ea99eea4b91bf6d 100644 (file)
@@ -1133,6 +1133,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
 
        ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+       if (ret)
+               dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
+                               hdev->name);
 
        mt_set_maxcontacts(hdev);
        mt_set_input_mode(hdev);
index 67cd059a8f46cdf7dab8d73cb0edb7bd5aff1333..9cd2ca34a6be5583dbcd82a64feb018f66b20bf8 100644 (file)
@@ -594,6 +594,9 @@ static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
        int ret;
        u8 buf[RMI_F11_CTRL_REG_COUNT];
 
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
+
        ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
                                RMI_F11_CTRL_REG_COUNT);
        if (ret)
@@ -613,6 +616,9 @@ static int rmi_post_reset(struct hid_device *hdev)
        struct rmi_data *data = hid_get_drvdata(hdev);
        int ret;
 
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
+
        ret = rmi_reset_attn_mode(hdev);
        if (ret) {
                hid_err(hdev, "can not set rmi mode\n");
@@ -640,6 +646,11 @@ static int rmi_post_reset(struct hid_device *hdev)
 
 static int rmi_post_resume(struct hid_device *hdev)
 {
+       struct rmi_data *data = hid_get_drvdata(hdev);
+
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
+
        return rmi_reset_attn_mode(hdev);
 }
 #endif /* CONFIG_PM */
index 9b8db0e0ef1cdd6c3b2b050d87a50cb32e18ce1d..310436a54a3f308157a272d56d6624c3dd3c98c1 100644 (file)
@@ -50,6 +50,7 @@
 #define MOTION_CONTROLLER_BT      BIT(8)
 #define NAVIGATION_CONTROLLER_USB BIT(9)
 #define NAVIGATION_CONTROLLER_BT  BIT(10)
+#define SINO_LITE_CONTROLLER      BIT(11)
 
 #define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
 #define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -74,7 +75,7 @@
  * axis values.  Additionally, the controller only has 20 actual, physical axes
  * so there are several unused axes in between the used ones.
  */
-static __u8 sixaxis_rdesc[] = {
+static u8 sixaxis_rdesc[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),               */
        0x09, 0x04,         /*  Usage (Joystick),                   */
        0xA1, 0x01,         /*  Collection (Application),           */
@@ -152,7 +153,7 @@ static __u8 sixaxis_rdesc[] = {
 };
 
 /* PS/3 Motion controller */
-static __u8 motion_rdesc[] = {
+static u8 motion_rdesc[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),               */
        0x09, 0x04,         /*  Usage (Joystick),                   */
        0xA1, 0x01,         /*  Collection (Application),           */
@@ -249,9 +250,9 @@ static __u8 motion_rdesc[] = {
 };
 
 /* PS/3 Navigation controller */
-static __u8 navigation_rdesc[] = {
+static u8 navigation_rdesc[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),               */
-       0x09, 0x04,         /*  Usage (Joystik),                    */
+       0x09, 0x04,         /*  Usage (Joystick),                   */
        0xA1, 0x01,         /*  Collection (Application),           */
        0xA1, 0x02,         /*      Collection (Logical),           */
        0x85, 0x01,         /*          Report ID (1),              */
@@ -809,7 +810,7 @@ static u8 dualshock4_bt_rdesc[] = {
        0xC0                /*  End Collection                      */
 };
 
-static __u8 ps3remote_rdesc[] = {
+static u8 ps3remote_rdesc[] = {
        0x05, 0x01,          /* GUsagePage Generic Desktop */
        0x09, 0x05,          /* LUsage 0x05 [Game Pad] */
        0xA1, 0x01,          /* MCollection Application (mouse, keyboard) */
@@ -817,14 +818,18 @@ static __u8 ps3remote_rdesc[] = {
         /* Use collection 1 for joypad buttons */
         0xA1, 0x02,         /* MCollection Logical (interrelated data) */
 
-         /* Ignore the 1st byte, maybe it is used for a controller
-          * number but it's not needed for correct operation */
+         /*
+          * Ignore the 1st byte, maybe it is used for a controller
+          * number but it's not needed for correct operation
+          */
          0x75, 0x08,        /* GReportSize 0x08 [8] */
          0x95, 0x01,        /* GReportCount 0x01 [1] */
          0x81, 0x01,        /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
 
-         /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
-          * buttons multiple keypresses are allowed */
+         /*
+          * Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
+          * buttons multiple keypresses are allowed
+          */
          0x05, 0x09,        /* GUsagePage Button */
          0x19, 0x01,        /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */
          0x29, 0x18,        /* LUsageMaximum 0x18 [Button 24] */
@@ -849,8 +854,10 @@ static __u8 ps3remote_rdesc[] = {
          0x95, 0x01,        /* GReportCount 0x01 [1] */
          0x80,              /* MInput  */
 
-         /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at
-          * 0xff and 11th is for press indication */
+         /*
+          * Ignore bytes from 6th to 11th, 6th to 10th are always constant at
+          * 0xff and 11th is for press indication
+          */
          0x75, 0x08,        /* GReportSize 0x08 [8] */
          0x95, 0x06,        /* GReportCount 0x06 [6] */
          0x81, 0x01,        /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
@@ -929,7 +936,7 @@ static const unsigned int buzz_keymap[] = {
        /*
         * The controller has 4 remote buzzers, each with one LED and 5
         * buttons.
-        * 
+        *
         * We use the mapping chosen by the controller, which is:
         *
         * Key          Offset
@@ -943,15 +950,15 @@ static const unsigned int buzz_keymap[] = {
         * So, for example, the orange button on the third buzzer is mapped to
         * BTN_TRIGGER_HAPPY14
         */
-       1] = BTN_TRIGGER_HAPPY1,
-       2] = BTN_TRIGGER_HAPPY2,
-       3] = BTN_TRIGGER_HAPPY3,
-       4] = BTN_TRIGGER_HAPPY4,
-       5] = BTN_TRIGGER_HAPPY5,
-       6] = BTN_TRIGGER_HAPPY6,
-       7] = BTN_TRIGGER_HAPPY7,
-       8] = BTN_TRIGGER_HAPPY8,
-       9] = BTN_TRIGGER_HAPPY9,
+        [1] = BTN_TRIGGER_HAPPY1,
+        [2] = BTN_TRIGGER_HAPPY2,
+        [3] = BTN_TRIGGER_HAPPY3,
+        [4] = BTN_TRIGGER_HAPPY4,
+        [5] = BTN_TRIGGER_HAPPY5,
+        [6] = BTN_TRIGGER_HAPPY6,
+        [7] = BTN_TRIGGER_HAPPY7,
+        [8] = BTN_TRIGGER_HAPPY8,
+        [9] = BTN_TRIGGER_HAPPY9,
        [10] = BTN_TRIGGER_HAPPY10,
        [11] = BTN_TRIGGER_HAPPY11,
        [12] = BTN_TRIGGER_HAPPY12,
@@ -973,33 +980,33 @@ static enum power_supply_property sony_battery_props[] = {
 };
 
 struct sixaxis_led {
-       __u8 time_enabled; /* the total time the led is active (0xff means forever) */
-       __u8 duty_length;  /* how long a cycle is in deciseconds (0 means "really fast") */
-       __u8 enabled;
-       __u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
-       __u8 duty_on;  /* % of duty_length the led is on (0xff mean 100%) */
+       u8 time_enabled; /* the total time the led is active (0xff means forever) */
+       u8 duty_length;  /* how long a cycle is in deciseconds (0 means "really fast") */
+       u8 enabled;
+       u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
+       u8 duty_on;  /* % of duty_length the led is on (0xff mean 100%) */
 } __packed;
 
 struct sixaxis_rumble {
-       __u8 padding;
-       __u8 right_duration; /* Right motor duration (0xff means forever) */
-       __u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
-       __u8 left_duration;    /* Left motor duration (0xff means forever) */
-       __u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
+       u8 padding;
+       u8 right_duration; /* Right motor duration (0xff means forever) */
+       u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
+       u8 left_duration;    /* Left motor duration (0xff means forever) */
+       u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
 } __packed;
 
 struct sixaxis_output_report {
-       __u8 report_id;
+       u8 report_id;
        struct sixaxis_rumble rumble;
-       __u8 padding[4];
-       __u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
+       u8 padding[4];
+       u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
        struct sixaxis_led led[4];    /* LEDx at (4 - x) */
        struct sixaxis_led _reserved; /* LED5, not actually soldered */
 } __packed;
 
 union sixaxis_output_report_01 {
        struct sixaxis_output_report data;
-       __u8 buf[36];
+       u8 buf[36];
 };
 
 struct motion_output_report_02 {
@@ -1028,30 +1035,30 @@ struct sony_sc {
        struct led_classdev *leds[MAX_LEDS];
        unsigned long quirks;
        struct work_struct state_worker;
-       void(*send_output_report)(struct sony_sc*);
+       void (*send_output_report)(struct sony_sc *);
        struct power_supply *battery;
        struct power_supply_desc battery_desc;
        int device_id;
-       __u8 *output_report_dmabuf;
+       u8 *output_report_dmabuf;
 
 #ifdef CONFIG_SONY_FF
-       __u8 left;
-       __u8 right;
+       u8 left;
+       u8 right;
 #endif
 
-       __u8 mac_address[6];
-       __u8 worker_initialized;
-       __u8 cable_state;
-       __u8 battery_charging;
-       __u8 battery_capacity;
-       __u8 led_state[MAX_LEDS];
-       __u8 resume_led_state[MAX_LEDS];
-       __u8 led_delay_on[MAX_LEDS];
-       __u8 led_delay_off[MAX_LEDS];
-       __u8 led_count;
+       u8 mac_address[6];
+       u8 worker_initialized;
+       u8 cable_state;
+       u8 battery_charging;
+       u8 battery_capacity;
+       u8 led_state[MAX_LEDS];
+       u8 resume_led_state[MAX_LEDS];
+       u8 led_delay_on[MAX_LEDS];
+       u8 led_delay_off[MAX_LEDS];
+       u8 led_count;
 };
 
-static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sixaxis_fixup(struct hid_device *hdev, u8 *rdesc,
                             unsigned int *rsize)
 {
        *rsize = sizeof(sixaxis_rdesc);
@@ -1072,7 +1079,7 @@ static u8 *navigation_fixup(struct hid_device *hdev, u8 *rdesc,
        return navigation_rdesc;
 }
 
-static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc,
                             unsigned int *rsize)
 {
        *rsize = sizeof(ps3remote_rdesc);
@@ -1113,11 +1120,14 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
        return 1;
 }
 
-static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
                unsigned int *rsize)
 {
        struct sony_sc *sc = hid_get_drvdata(hdev);
 
+       if (sc->quirks & SINO_LITE_CONTROLLER)
+               return rdesc;
+
        /*
         * Some Sony RF receivers wrongly declare the mouse pointer as a
         * a constant non-data variable.
@@ -1164,12 +1174,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        return rdesc;
 }
 
-static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
 {
-       static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
+       static const u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
        unsigned long flags;
        int offset;
-       __u8 cable_state, battery_capacity, battery_charging;
+       u8 cable_state, battery_capacity, battery_charging;
 
        /*
         * The sixaxis is charging if the battery value is 0xee
@@ -1184,7 +1194,7 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
                battery_charging = !(rd[offset] & 0x01);
                cable_state = 1;
        } else {
-               __u8 index = rd[offset] <= 5 ? rd[offset] : 5;
+               u8 index = rd[offset] <= 5 ? rd[offset] : 5;
                battery_capacity = sixaxis_battery_capacity[index];
                battery_charging = 0;
                cable_state = 0;
@@ -1197,14 +1207,14 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
        spin_unlock_irqrestore(&sc->lock, flags);
 }
 
-static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
 {
        struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
                                                struct hid_input, list);
        struct input_dev *input_dev = hidinput->input;
        unsigned long flags;
        int n, offset;
-       __u8 cable_state, battery_capacity, battery_charging;
+       u8 cable_state, battery_capacity, battery_charging;
 
        /*
         * Battery and touchpad data starts at byte 30 in the USB report and
@@ -1254,7 +1264,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
         * follows the data for the first.
         */
        for (n = 0; n < 2; n++) {
-               __u16 x, y;
+               u16 x, y;
 
                x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
                y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
@@ -1270,7 +1280,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
 }
 
 static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
-               __u8 *rd, int size)
+               u8 *rd, int size)
 {
        struct sony_sc *sc = hid_get_drvdata(hdev);
 
@@ -1394,7 +1404,7 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
 {
        const int buf_size =
                max(SIXAXIS_REPORT_0xF2_SIZE, SIXAXIS_REPORT_0xF5_SIZE);
-       __u8 *buf;
+       u8 *buf;
        int ret;
 
        buf = kmalloc(buf_size, GFP_KERNEL);
@@ -1420,8 +1430,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
        }
 
        ret = hid_hw_output_report(hdev, buf, 1);
-       if (ret < 0)
-               hid_err(hdev, "can't set operational mode: step 3\n");
+       if (ret < 0) {
+               hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+               ret = 0;
+       }
 
 out:
        kfree(buf);
@@ -1431,8 +1443,8 @@ out:
 
 static int sixaxis_set_operational_bt(struct hid_device *hdev)
 {
-       static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
-       __u8 *buf;
+       static const u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
+       u8 *buf;
        int ret;
 
        buf = kmemdup(report, sizeof(report), GFP_KERNEL);
@@ -1453,7 +1465,7 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
  */
 static int dualshock4_set_operational_bt(struct hid_device *hdev)
 {
-       __u8 *buf;
+       u8 *buf;
        int ret;
 
        buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
@@ -1470,7 +1482,7 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
 
 static void sixaxis_set_leds_from_id(struct sony_sc *sc)
 {
-       static const __u8 sixaxis_leds[10][4] = {
+       static const u8 sixaxis_leds[10][4] = {
                                { 0x01, 0x00, 0x00, 0x00 },
                                { 0x00, 0x01, 0x00, 0x00 },
                                { 0x00, 0x00, 0x01, 0x00 },
@@ -1497,7 +1509,7 @@ static void sixaxis_set_leds_from_id(struct sony_sc *sc)
 static void dualshock4_set_leds_from_id(struct sony_sc *sc)
 {
        /* The first 4 color/index entries match what the PS4 assigns */
-       static const __u8 color_code[7][3] = {
+       static const u8 color_code[7][3] = {
                        /* Blue   */    { 0x00, 0x00, 0x01 },
                        /* Red    */    { 0x01, 0x00, 0x00 },
                        /* Green  */    { 0x00, 0x01, 0x00 },
@@ -1525,7 +1537,7 @@ static void buzz_set_leds(struct sony_sc *sc)
                &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
        struct hid_report *report = list_entry(report_list->next,
                struct hid_report, list);
-       __s32 *value = report->field[0]->value;
+       s32 *value = report->field[0]->value;
 
        BUILD_BUG_ON(MAX_LEDS < 4);
 
@@ -1619,7 +1631,7 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
        struct hid_device *hdev = to_hid_device(dev);
        struct sony_sc *drv_data = hid_get_drvdata(hdev);
        int n;
-       __u8 new_on, new_off;
+       u8 new_on, new_off;
 
        if (!drv_data) {
                hid_err(hdev, "No device data\n");
@@ -1690,8 +1702,8 @@ static int sony_leds_init(struct sony_sc *sc)
        const char *name_fmt;
        static const char * const ds4_name_str[] = { "red", "green", "blue",
                                                  "global" };
-       __u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
-       __u8 use_hw_blink[MAX_LEDS] = { 0 };
+       u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
+       u8 use_hw_blink[MAX_LEDS] = { 0 };
 
        BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
 
@@ -1719,7 +1731,7 @@ static int sony_leds_init(struct sony_sc *sc)
                name_len = 0;
                name_fmt = "%s:%s";
        } else if (sc->quirks & NAVIGATION_CONTROLLER) {
-               static const __u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
+               static const u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
 
                memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds));
                sc->led_count = 1;
@@ -1796,7 +1808,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
        static const union sixaxis_output_report_01 default_report = {
                .buf = {
                        0x01,
-                       0x00, 0xff, 0x00, 0xff, 0x00,
+                       0x01, 0xff, 0x00, 0xff, 0x00,
                        0x00, 0x00, 0x00, 0x00, 0x00,
                        0xff, 0x27, 0x10, 0x00, 0x32,
                        0xff, 0x27, 0x10, 0x00, 0x32,
@@ -1842,7 +1854,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
                }
        }
 
-       hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
+       hid_hw_raw_request(sc->hdev, report->report_id, (u8 *)report,
                        sizeof(struct sixaxis_output_report),
                        HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
 }
@@ -1850,7 +1862,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
 static void dualshock4_send_output_report(struct sony_sc *sc)
 {
        struct hid_device *hdev = sc->hdev;
-       __u8 *buf = sc->output_report_dmabuf;
+       u8 *buf = sc->output_report_dmabuf;
        int offset;
 
        if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
@@ -1910,7 +1922,7 @@ static void motion_send_output_report(struct sony_sc *sc)
        report->rumble = max(sc->right, sc->left);
 #endif
 
-       hid_hw_output_report(hdev, (__u8 *)report, MOTION_REPORT_0x02_SIZE);
+       hid_hw_output_report(hdev, (u8 *)report, MOTION_REPORT_0x02_SIZE);
 }
 
 static inline void sony_send_output_report(struct sony_sc *sc)
@@ -1922,6 +1934,7 @@ static inline void sony_send_output_report(struct sony_sc *sc)
 static void sony_state_worker(struct work_struct *work)
 {
        struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+
        sc->send_output_report(sc);
 }
 
@@ -2142,7 +2155,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc)
 
 static int sony_check_add(struct sony_sc *sc)
 {
-       __u8 *buf = NULL;
+       u8 *buf = NULL;
        int n, ret;
 
        if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
@@ -2253,7 +2266,7 @@ static void sony_release_device_id(struct sony_sc *sc)
 }
 
 static inline void sony_init_output_report(struct sony_sc *sc,
-                               void(*send_output_report)(struct sony_sc*))
+                               void (*send_output_report)(struct sony_sc *))
 {
        sc->send_output_report = send_output_report;
 
@@ -2441,7 +2454,7 @@ static int sony_suspend(struct hid_device *hdev, pm_message_t message)
        /*
         * On suspend save the current LED state,
         * stop running force-feedback and blank the LEDS.
-         */
+        */
        if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
                struct sony_sc *sc = hid_get_drvdata(hdev);
 
@@ -2501,8 +2514,10 @@ static const struct hid_device_id sony_devices[] = {
                .driver_data = VAIO_RDESC_CONSTANT },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
                .driver_data = VAIO_RDESC_CONSTANT },
-       /* Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
-        * Logitech joystick from the device descriptor. */
+       /*
+        * Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
+        * Logitech joystick from the device descriptor.
+        */
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER),
                .driver_data = BUZZ_CONTROLLER },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER),
@@ -2521,6 +2536,9 @@ static const struct hid_device_id sony_devices[] = {
                .driver_data = DUALSHOCK4_CONTROLLER_USB },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
                .driver_data = DUALSHOCK4_CONTROLLER_BT },
+       /* Nyko Core Controller for PS3 */
+       { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
+               .driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
        { }
 };
 MODULE_DEVICE_TABLE(hid, sony_devices);
index 7dd0953cd70f222f037c72b7c217b76eff8d2942..cb8cbdd07c4e00571562c4294a40348419263ead 100644 (file)
@@ -55,6 +55,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
 
        { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -106,6 +107,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
index 99ef77fcfb8040d9bcb0021ebb13a901a21c741e..e92e1e855a728725c66dbba1c06edc60cb2339f5 100644 (file)
@@ -580,11 +580,12 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
        struct wacom_features *features = &wacom->features;
        unsigned char *data = wacom->data;
        struct input_dev *input = wacom->pen_input;
-       int idx = 0;
+       int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
 
-       /* tool number */
-       if (features->type == INTUOS)
-               idx = data[1] & 0x01;
+       if (!(((data[1] & 0xfc) == 0xc0) ||  /* in prox */
+           ((data[1] & 0xfe) == 0x20) ||    /* in range */
+           ((data[1] & 0xfe) == 0x80)))     /* out prox */
+               return 0;
 
        /* Enter report */
        if ((data[1] & 0xfc) == 0xc0) {
@@ -612,6 +613,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x885: /* Intuos3 Marker Pen */
                case 0x802: /* Intuos4/5 13HD/24HD General Pen */
                case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+               case 0x8e2: /* IntuosHT2 pen */
                case 0x022:
                case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
                case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
@@ -673,39 +675,23 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
                }
+               wacom->shared->stylus_in_proximity = true;
                return 1;
        }
 
-       /*
-        * don't report events for invalid data
-        */
-       /* older I4 styli don't work with new Cintiqs */
-       if ((!((wacom->id[idx] >> 20) & 0x01) &&
-                       (features->type == WACOM_21UX2)) ||
-           /* Only large Intuos support Lense Cursor */
-           (wacom->tool[idx] == BTN_TOOL_LENS &&
-               (features->type == INTUOS3 ||
-                features->type == INTUOS3S ||
-                features->type == INTUOS4 ||
-                features->type == INTUOS4S ||
-                features->type == INTUOS5 ||
-                features->type == INTUOS5S ||
-                features->type == INTUOSPM ||
-                features->type == INTUOSPS)) ||
-          /* Cintiq doesn't send data when RDY bit isn't set */
-          (features->type == CINTIQ && !(data[1] & 0x40)))
-               return 1;
+       /* in Range */
+       if ((data[1] & 0xfe) == 0x20) {
+               if (features->type != INTUOSHT2)
+                       wacom->shared->stylus_in_proximity = true;
 
-       wacom->shared->stylus_in_proximity = true;
-       if (wacom->shared->touch_down)
+               /* in Range while exiting */
+               if (wacom->reporting_data) {
+                       input_report_key(input, BTN_TOUCH, 0);
+                       input_report_abs(input, ABS_PRESSURE, 0);
+                       input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+                       return 2;
+               }
                return 1;
-
-       /* in Range while exiting */
-       if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
-               input_report_key(input, BTN_TOUCH, 0);
-               input_report_abs(input, ABS_PRESSURE, 0);
-               input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
-               return 2;
        }
 
        /* Exit report */
@@ -750,13 +736,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                return 2;
        }
 
-       /* don't report other events if we don't know the ID */
-       if (!wacom->id[idx]) {
-               /* but reschedule a read of the current tool */
-               wacom_intuos_schedule_prox_event(wacom);
-               return 1;
-       }
-
        return 0;
 }
 
@@ -897,6 +876,36 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
                data[0] != WACOM_REPORT_INTUOS_PEN)
                return 0;
 
+       if (wacom->shared->touch_down)
+               return 1;
+
+       /* don't report events if we don't know the tool ID */
+       if (!wacom->id[idx]) {
+               /* but reschedule a read of the current tool */
+               wacom_intuos_schedule_prox_event(wacom);
+               return 1;
+       }
+
+       /*
+        * don't report events for invalid data
+        */
+       /* older I4 styli don't work with new Cintiqs */
+       if ((!((wacom->id[idx] >> 20) & 0x01) &&
+                       (features->type == WACOM_21UX2)) ||
+           /* Only large Intuos support Lense Cursor */
+           (wacom->tool[idx] == BTN_TOOL_LENS &&
+               (features->type == INTUOS3 ||
+                features->type == INTUOS3S ||
+                features->type == INTUOS4 ||
+                features->type == INTUOS4S ||
+                features->type == INTUOS5 ||
+                features->type == INTUOS5S ||
+                features->type == INTUOSPM ||
+                features->type == INTUOSPS)) ||
+          /* Cintiq doesn't send data when RDY bit isn't set */
+          (features->type == CINTIQ && !(data[1] & 0x40)))
+               return 1;
+
        x = (be16_to_cpup((__be16 *)&data[2]) << 1) | ((data[9] >> 1) & 1);
        y = (be16_to_cpup((__be16 *)&data[4]) << 1) | (data[9] & 1);
        distance = data[9] >> 2;
index 60fb80bd353d601563c57710d8e897d0af50bd30..852c8a85e1e88a3b7d8d5db2f076fd3284bbf9ef 100644 (file)
@@ -685,6 +685,20 @@ config SENSORS_LTC2945
          This driver can also be built as a module. If so, the module will
          be called ltc2945.
 
+config SENSORS_LTC2990
+       tristate "Linear Technology LTC2990 (current monitoring mode only)"
+       depends on I2C
+       help
+         If you say yes here you get support for Linear Technology LTC2990
+         I2C System Monitor. The LTC2990 supports a combination of voltage,
+         current and temperature monitoring, but in addition to the Vcc supply
+         voltage and chip temperature, this driver currently only supports
+         reading two currents by measuring two differential voltages across
+         series resistors.
+
+         This driver can also be built as a module. If so, the module will
+         be called ltc2990.
+
 config SENSORS_LTC4151
        tristate "Linear Technology LTC4151"
        depends on I2C
index 30c94df314658fc8f9dcb04c7ae05eb141c979e1..a7ecaf2f29aabeaef33b40d69555622d255520e7 100644 (file)
@@ -100,6 +100,7 @@ obj-$(CONFIG_SENSORS_LM95234)       += lm95234.o
 obj-$(CONFIG_SENSORS_LM95241)  += lm95241.o
 obj-$(CONFIG_SENSORS_LM95245)  += lm95245.o
 obj-$(CONFIG_SENSORS_LTC2945)  += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2990)  += ltc2990.o
 obj-$(CONFIG_SENSORS_LTC4151)  += ltc4151.o
 obj-$(CONFIG_SENSORS_LTC4215)  += ltc4215.o
 obj-$(CONFIG_SENSORS_LTC4222)  += ltc4222.o
@@ -149,7 +150,7 @@ obj-$(CONFIG_SENSORS_TMP103)        += tmp103.o
 obj-$(CONFIG_SENSORS_TMP401)   += tmp401.o
 obj-$(CONFIG_SENSORS_TMP421)   += tmp421.o
 obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
-obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
 obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
 obj-$(CONFIG_SENSORS_VIA686A)  += via686a.o
 obj-$(CONFIG_SENSORS_VT1211)   += vt1211.o
index c8487894b31236cefd761b24cac48fb4e17e6d52..c43318d3416e20a4d027a32402b1cf751718b8c6 100644 (file)
@@ -930,6 +930,17 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
 static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8000
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8000",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
+               },
+       },
        {
                /*
                 * CPU fan speed going up and down on Dell Studio XPS 8100
index 952fe692d7649a67d1dfe1d8d8c2c2ed8272a719..24e395c5907d4c549912072c3219bd24c389488e 100644 (file)
@@ -58,7 +58,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
  */
 static int apd = -1;
 module_param(apd, bint, 0);
-MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
+MODULE_PARM_DESC(apd, "Set to zero to disable anti-parallel diode mode");
 
 struct temperature {
        s8      degrees;
index f77eb971ce959a6d3501dfdf740e8f8f566383e5..4f695d8fcafaceab3c5d8caaa677b58e94ae9c2c 100644 (file)
@@ -90,7 +90,15 @@ static ssize_t show_power(struct device *dev,
        pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
                                  REG_TDP_LIMIT3, &val);
 
-       tdp_limit = val >> 16;
+       /*
+        * On Carrizo and later platforms, ApmTdpLimit bit field
+        * is extended to 16:31 from 16:28.
+        */
+       if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
+               tdp_limit = val >> 16;
+       else
+               tdp_limit = (val >> 16) & 0x1fff;
+
        curr_pwr_watts = ((u64)(tdp_limit +
                                data->base_tdp)) << running_avg_range;
        curr_pwr_watts -= running_avg_capture;
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
new file mode 100644 (file)
index 0000000..8f8fe05
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Driver for Linear Technology LTC2990 power monitor
+ *
+ * Copyright (C) 2014 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * License: GPLv2
+ *
+ * This driver assumes the chip is wired as a dual current monitor, and
+ * reports the voltage drop across two series resistors. It also reports
+ * the chip's internal temperature and Vcc power supply voltage.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define LTC2990_STATUS 0x00
+#define LTC2990_CONTROL        0x01
+#define LTC2990_TRIGGER        0x02
+#define LTC2990_TINT_MSB       0x04
+#define LTC2990_V1_MSB 0x06
+#define LTC2990_V2_MSB 0x08
+#define LTC2990_V3_MSB 0x0A
+#define LTC2990_V4_MSB 0x0C
+#define LTC2990_VCC_MSB        0x0E
+
+#define LTC2990_CONTROL_KELVIN         BIT(7)
+#define LTC2990_CONTROL_SINGLE         BIT(6)
+#define LTC2990_CONTROL_MEASURE_ALL    (0x3 << 3)
+#define LTC2990_CONTROL_MODE_CURRENT   0x06
+#define LTC2990_CONTROL_MODE_VOLTAGE   0x07
+
+/* convert raw register value to sign-extended integer in 16-bit range */
+static int ltc2990_voltage_to_int(int raw)
+{
+       if (raw & BIT(14))
+               return -(0x4000 - (raw & 0x3FFF)) << 2;
+       else
+               return (raw & 0x3FFF) << 2;
+}
+
+/* Return the converted value from the given register in uV or mC */
+static int ltc2990_get_value(struct i2c_client *i2c, u8 reg, int *result)
+{
+       int val;
+
+       val = i2c_smbus_read_word_swapped(i2c, reg);
+       if (unlikely(val < 0))
+               return val;
+
+       switch (reg) {
+       case LTC2990_TINT_MSB:
+               /* internal temp, 0.0625 degrees/LSB, 13-bit  */
+               val = (val & 0x1FFF) << 3;
+               *result = (val * 1000) >> 7;
+               break;
+       case LTC2990_V1_MSB:
+       case LTC2990_V3_MSB:
+                /* Vx-Vy, 19.42uV/LSB. Depends on mode. */
+               *result = ltc2990_voltage_to_int(val) * 1942 / (4 * 100);
+               break;
+       case LTC2990_VCC_MSB:
+               /* Vcc, 305.18μV/LSB, 2.5V offset */
+               *result = (ltc2990_voltage_to_int(val) * 30518 /
+                          (4 * 100 * 1000)) + 2500;
+               break;
+       default:
+               return -EINVAL; /* won't happen, keep compiler happy */
+       }
+
+       return 0;
+}
+
+static ssize_t ltc2990_show_value(struct device *dev,
+                                 struct device_attribute *da, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       int value;
+       int ret;
+
+       ret = ltc2990_get_value(dev_get_drvdata(dev), attr->index, &value);
+       if (unlikely(ret < 0))
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_TINT_MSB);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_V1_MSB);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_V3_MSB);
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_VCC_MSB);
+
+static struct attribute *ltc2990_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_curr1_input.dev_attr.attr,
+       &sensor_dev_attr_curr2_input.dev_attr.attr,
+       &sensor_dev_attr_in0_input.dev_attr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(ltc2990);
+
+static int ltc2990_i2c_probe(struct i2c_client *i2c,
+                            const struct i2c_device_id *id)
+{
+       int ret;
+       struct device *hwmon_dev;
+
+       if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
+
+       /* Setup continuous mode, current monitor */
+       ret = i2c_smbus_write_byte_data(i2c, LTC2990_CONTROL,
+                                       LTC2990_CONTROL_MEASURE_ALL |
+                                       LTC2990_CONTROL_MODE_CURRENT);
+       if (ret < 0) {
+               dev_err(&i2c->dev, "Error: Failed to set control mode.\n");
+               return ret;
+       }
+       /* Trigger once to start continuous conversion */
+       ret = i2c_smbus_write_byte_data(i2c, LTC2990_TRIGGER, 1);
+       if (ret < 0) {
+               dev_err(&i2c->dev, "Error: Failed to start acquisition.\n");
+               return ret;
+       }
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(&i2c->dev,
+                                                          i2c->name,
+                                                          i2c,
+                                                          ltc2990_groups);
+
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2990_i2c_id[] = {
+       { "ltc2990", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2990_i2c_id);
+
+static struct i2c_driver ltc2990_i2c_driver = {
+       .driver = {
+               .name = "ltc2990",
+       },
+       .probe    = ltc2990_i2c_probe,
+       .id_table = ltc2990_i2c_id,
+};
+
+module_i2c_driver(ltc2990_i2c_driver);
+
+MODULE_DESCRIPTION("LTC2990 Sensor Driver");
+MODULE_AUTHOR("Topic Embedded Products");
+MODULE_LICENSE("GPL v2");
index 52f708bcf77f397952ea0f278ce5b161780e076a..d50c701b19d678e9998319be36a492bb3a8eba6d 100644 (file)
@@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
                hwlock = radix_tree_deref_slot(slot);
                if (unlikely(!hwlock))
                        continue;
+               if (radix_tree_is_indirect_ptr(hwlock)) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
 
                if (hwlock->bank->dev->of_node == args.np) {
                        ret = 0;
index ba9732c236c52985c922a63d50bec477585af610..10fbd6d841e06aef8ae0a627f9fcf36106f5119b 100644 (file)
@@ -874,7 +874,8 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
        i2c_set_adapdata(adap, dev);
 
        i2c_dw_disable_int(dev);
-       r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
+       r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
+                            IRQF_SHARED | IRQF_COND_SUSPEND,
                             dev_name(dev->dev), dev);
        if (r) {
                dev_err(dev->dev, "failure requesting irq %i: %d\n",
index e045985950737c429c3c3253620559f55be1244e..93f2895383ee55bb9f060cf145af92adbbeb512e 100644 (file)
@@ -137,10 +137,11 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
 };
 
 /* SB800 globals */
+static DEFINE_MUTEX(piix4_mutex_sb800);
 static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
-       "SDA0", "SDA2", "SDA3", "SDA4"
+       " port 0", " port 2", " port 3", " port 4"
 };
-static const char *piix4_aux_port_name_sb800 = "SDA1";
+static const char *piix4_aux_port_name_sb800 = " port 1";
 
 struct i2c_piix4_adapdata {
        unsigned short smba;
@@ -148,7 +149,6 @@ struct i2c_piix4_adapdata {
        /* SB800 */
        bool sb800_main;
        unsigned short port;
-       struct mutex *mutex;
 };
 
 static int piix4_setup(struct pci_dev *PIIX4_dev,
@@ -275,10 +275,12 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        else
                smb_en = (aux) ? 0x28 : 0x2c;
 
+       mutex_lock(&piix4_mutex_sb800);
        outb_p(smb_en, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
        outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
        smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
+       mutex_unlock(&piix4_mutex_sb800);
 
        if (!smb_en) {
                smb_en_status = smba_en_lo & 0x10;
@@ -559,7 +561,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        u8 port;
        int retval;
 
-       mutex_lock(adapdata->mutex);
+       mutex_lock(&piix4_mutex_sb800);
 
        outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -574,7 +576,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
 
        outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
 
-       mutex_unlock(adapdata->mutex);
+       mutex_unlock(&piix4_mutex_sb800);
 
        return retval;
 }
@@ -625,6 +627,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
 static struct i2c_adapter *piix4_aux_adapter;
 
 static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+                            bool sb800_main, unsigned short port,
                             const char *name, struct i2c_adapter **padap)
 {
        struct i2c_adapter *adap;
@@ -639,7 +642,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
        adap->owner = THIS_MODULE;
        adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
-       adap->algo = &smbus_algorithm;
+       adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800
+                               : &smbus_algorithm;
 
        adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
        if (adapdata == NULL) {
@@ -649,12 +653,14 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
        }
 
        adapdata->smba = smba;
+       adapdata->sb800_main = sb800_main;
+       adapdata->port = port;
 
        /* set up the sysfs linkage to our parent device */
        adap->dev.parent = &dev->dev;
 
        snprintf(adap->name, sizeof(adap->name),
-               "SMBus PIIX4 adapter %s at %04x", name, smba);
+               "SMBus PIIX4 adapter%s at %04x", name, smba);
 
        i2c_set_adapdata(adap, adapdata);
 
@@ -673,30 +679,16 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
 static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
 {
-       struct mutex *mutex;
        struct i2c_piix4_adapdata *adapdata;
        int port;
        int retval;
 
-       mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
-       if (mutex == NULL)
-               return -ENOMEM;
-
-       mutex_init(mutex);
-
        for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
-               retval = piix4_add_adapter(dev, smba,
+               retval = piix4_add_adapter(dev, smba, true, port,
                                           piix4_main_port_names_sb800[port],
                                           &piix4_main_adapters[port]);
                if (retval < 0)
                        goto error;
-
-               piix4_main_adapters[port]->algo = &piix4_smbus_algorithm_sb800;
-
-               adapdata = i2c_get_adapdata(piix4_main_adapters[port]);
-               adapdata->sb800_main = true;
-               adapdata->port = port;
-               adapdata->mutex = mutex;
        }
 
        return retval;
@@ -714,19 +706,20 @@ error:
                }
        }
 
-       kfree(mutex);
-
        return retval;
 }
 
 static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        int retval;
+       bool is_sb800 = false;
 
        if ((dev->vendor == PCI_VENDOR_ID_ATI &&
             dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
             dev->revision >= 0x40) ||
            dev->vendor == PCI_VENDOR_ID_AMD) {
+               is_sb800 = true;
+
                if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
                        dev_err(&dev->dev,
                        "SMBus base address index region 0x%x already in use!\n",
@@ -756,7 +749,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return retval;
 
                /* Try to register main SMBus adapter, give up if we can't */
-               retval = piix4_add_adapter(dev, retval, "main",
+               retval = piix4_add_adapter(dev, retval, false, 0, "",
                                           &piix4_main_adapters[0]);
                if (retval < 0)
                        return retval;
@@ -783,7 +776,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (retval > 0) {
                /* Try to add the aux adapter if it exists,
                 * piix4_add_adapter will clean up if this fails */
-               piix4_add_adapter(dev, retval, piix4_aux_port_name_sb800,
+               piix4_add_adapter(dev, retval, false, 0,
+                                 is_sb800 ? piix4_aux_port_name_sb800 : "",
                                  &piix4_aux_adapter);
        }
 
@@ -798,10 +792,8 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
                i2c_del_adapter(adap);
                if (adapdata->port == 0) {
                        release_region(adapdata->smba, SMBIOSIZE);
-                       if (adapdata->sb800_main) {
-                               kfree(adapdata->mutex);
+                       if (adapdata->sb800_main)
                                release_region(SB800_PIIX4_SMB_IDX, 2);
-                       }
                }
                kfree(adapdata);
                kfree(adap);
index 9ad014a7afc797b921d7b687b474cd4faaddfeba..b33646be699cbc090c028730e03ac8ac4b72c4e9 100644 (file)
@@ -28,7 +28,6 @@
 
 #ifdef CONFIG_PPC_PMAC
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #endif
 
 #define DRV_NAME "pdc202xx_new"
index 96a345248224b16fe78a2e71fed1920c8088edaa..7f0434f7e486666be6052e1c11c98c16f8f25174 100644 (file)
@@ -40,7 +40,6 @@
 #include <asm/io.h>
 #include <asm/dbdma.h>
 #include <asm/ide.h>
-#include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/sections.h>
index edc29b173f6c9012635771116a6cca23193a096e..833ea9dd4464b664e9d11103c8543961a65f5f01 100644 (file)
@@ -213,6 +213,7 @@ config STK8312
 config STK8BA50
        tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
        depends on I2C
+       depends on IIO_TRIGGER
        help
          Say yes here to get support for the Sensortek STK8BA50 3-axis
          accelerometer.
index 605ff42c46310201e7aba3fd2f7ab42a0a873e04..283ded7747a9379be4c105ef49964d5c5d674b0f 100644 (file)
@@ -175,6 +175,7 @@ config DA9150_GPADC
 config EXYNOS_ADC
        tristate "Exynos ADC driver support"
        depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
+       depends on HAS_IOMEM
        help
          Core support for the ADC block found in the Samsung EXYNOS series
          of SoCs for drivers such as the touchscreen and hwmon to use to share
@@ -207,6 +208,7 @@ config INA2XX_ADC
 config IMX7D_ADC
        tristate "IMX7D ADC driver"
        depends on ARCH_MXC || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          Say yes here to build support for IMX7D ADC.
 
@@ -409,6 +411,7 @@ config TWL6030_GPADC
 config VF610_ADC
        tristate "Freescale vf610 ADC driver"
        depends on OF
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index 942320e32753c3838e967e9e0a96d4db2d691fce..c1e05532d437f263a9aa3d7f7c96147b13bfe682 100644 (file)
@@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
                goto error_kfifo_free;
 
        indio_dev->setup_ops = setup_ops;
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+       indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
        return 0;
 
index 43d14588448d65f507ab02ce22c0e01d7a9008ad..b4dde8315210c6519c7e2a7db4bd8bf3b6607988 100644 (file)
@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
        data->client = client;
 
        indio_dev->dev.parent = &client->dev;
+       indio_dev->name = id->name;
        indio_dev->info = &mcp4725_info;
        indio_dev->channels = &mcp4725_channel;
        indio_dev->num_channels = 1;
index 1165b1c4f9d67bd93db24f784da5b13010d9fd99..cfc5a051ab9f3946bfdfd4aaf5aaac01f7245f36 100644 (file)
@@ -117,7 +117,7 @@ static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
        if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
                return -EIO;
 
-       dht11->timestamp = ktime_get_real_ns();
+       dht11->timestamp = ktime_get_boot_ns();
        if (hum_int < 20) {  /* DHT22 */
                dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
                                        ((temp_int & 0x80) ? -100 : 100);
@@ -145,7 +145,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
 
        /* TODO: Consider making the handler safe for IRQ sharing */
        if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
-               dht11->edges[dht11->num_edges].ts = ktime_get_real_ns();
+               dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
                dht11->edges[dht11->num_edges++].value =
                                                gpio_get_value(dht11->gpio);
 
@@ -164,7 +164,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
        int ret, timeres;
 
        mutex_lock(&dht11->lock);
-       if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
+       if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
                timeres = ktime_get_resolution_ns();
                if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
                        dev_err(dht11->dev, "timeresolution %dns too low\n",
@@ -279,7 +279,7 @@ static int dht11_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1;
+       dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
        dht11->num_edges = -1;
 
        platform_set_drvdata(pdev, iio);
index cb32b593f1c55c023046d97cfc41263ea3818d89..36607d52fee065f9e2aa82f14215897e2c5b44b1 100644 (file)
@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
                return -ENOMEM;
 
        rx = adis->buffer;
-       tx = rx + indio_dev->scan_bytes;
+       tx = rx + scan_count;
 
        spi_message_init(&adis->msg);
 
index 48fbc0bc7e2a1d499015001bace2c46db2c8b440..8f8d1370ed8b9b3359d3a4f1135767a622df2c21 100644 (file)
@@ -5,9 +5,9 @@
 config INV_MPU6050_IIO
        tristate "Invensense MPU6050 devices"
        depends on I2C && SYSFS
+       depends on I2C_MUX
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
-       select I2C_MUX
        help
          This driver supports the Invensense MPU6050 devices.
          This driver can also support MPU6500 in MPU6050 compatibility mode
index 80fbbfd76faf9e1859eefb1b5dc55b2129f9858b..734a0042de0cb4265ee3145ee48f22a19b8b50af 100644 (file)
@@ -349,6 +349,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
 
 void iio_channel_release(struct iio_channel *channel)
 {
+       if (!channel)
+               return;
        iio_device_put(channel->indio_dev);
        kfree(channel);
 }
index 60537ec0c923b98c45160d011f4a17ac932e4609..53201d99a16c8d760f4ec909c8c5fb1cba6bc8be 100644 (file)
@@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
                        .realbits       = 32,
                        .storagebits    = 32,
                },
-               .info_mask_separate     = BIT(IIO_CHAN_INFO_RAW),
+               /* _RAW is here for backward ABI compatibility */
+               .info_mask_separate     = BIT(IIO_CHAN_INFO_RAW) |
+                                         BIT(IIO_CHAN_INFO_PROCESSED),
        },
 };
 
@@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
        s32 temp_val;
        int ret;
 
-       if (mask != IIO_CHAN_INFO_RAW)
+       if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
                return -EINVAL;
 
        /* we support only illumination (_ALI) so far. */
index 809a961b9a7f6d0d0077114e12767e4e267c7606..6bf89d8f374191cee48f258d1c25810b0e5dc410 100644 (file)
@@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
                        {500000, 2000000}
 };
 
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
                                           int len, int val, int val2)
 {
        int i, freq;
index f5ecd6e19f5de725e715e547a5527f5875050561..a0d7deeac62f78dfc416bf597e0b894436c75a8c 100644 (file)
@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
                *val = ret >> 6;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_OFFSET:
-               *val = 605;
+               *val = -605;
                *val2 = 750000;
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
index 93e29fb67fa00f94e9ca0a614addfc5cde7ebb76..db35e04a063767a9b2b6011cf3d0345c9d0929bd 100644 (file)
@@ -87,7 +87,7 @@ static int lidar_i2c_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
 
        ret = i2c_transfer(client->adapter, msg, 2);
 
-       return (ret == 2) ? 0 : ret;
+       return (ret == 2) ? 0 : -EIO;
 }
 
 static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
index 19837d2702789e63f1d948c1abaf4bae9a28d7f6..2116132568e70e61c8465ffc9d922d732aca28ed 100644 (file)
@@ -322,6 +322,8 @@ int ib_ud_header_init(int     payload_bytes,
                      int    immediate_present,
                      struct ib_ud_header *header)
 {
+       size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
+
        grh_present = grh_present && !ip_version;
        memset(header, 0, sizeof *header);
 
@@ -353,7 +355,8 @@ int ib_ud_header_init(int     payload_bytes,
        if (ip_version == 6 || grh_present) {
                header->grh.ip_version      = 6;
                header->grh.payload_length  =
-                       cpu_to_be16((IB_BTH_BYTES     +
+                       cpu_to_be16((udp_bytes        +
+                                    IB_BTH_BYTES     +
                                     IB_DETH_BYTES    +
                                     payload_bytes    +
                                     4                + /* ICRC     */
@@ -362,8 +365,6 @@ int ib_ud_header_init(int     payload_bytes,
        }
 
        if (ip_version == 4) {
-               int udp_bytes = udp_present ? IB_UDP_BYTES : 0;
-
                header->ip4.ver = 4; /* version 4 */
                header->ip4.hdr_len = 5; /* 5 words */
                header->ip4.tot_len =
index ec737e2287fe150ff8d5d83e6ca7042a2cca8b62..03c418ccbc982e4114681044798c075a65357b28 100644 (file)
@@ -844,6 +844,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        int err;
        int i;
        size_t reqlen;
+       size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+                                    max_cqe_version);
 
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
@@ -854,7 +856,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
        if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
                ver = 0;
-       else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+       else if (reqlen >= min_req_v2)
                ver = 2;
        else
                return ERR_PTR(-EINVAL);
@@ -2214,7 +2216,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
        dev->ib_dev.uverbs_ex_cmd_mask =
-               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
+               (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
+               (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 8fb9c27485e19959a09edf3b3bdf3f7c56557732..9116bc3988a6f9338709f6120a4597794d2cf36d 100644 (file)
@@ -1036,7 +1036,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
        wq = MLX5_ADDR_OF(rqc, rqc, wq);
        MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
        MLX5_SET(wq, wq, end_padding_mode,
-                MLX5_GET64(qpc, qpc, end_padding_mode));
+                MLX5_GET(qpc, qpc, end_padding_mode));
        MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
        MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
        MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1615,15 +1615,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 
        if (pd) {
                dev = to_mdev(pd->device);
-       } else {
-               /* being cautious here */
-               if (init_attr->qp_type != IB_QPT_XRC_TGT &&
-                   init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
-                       pr_warn("%s: no PD for transport %s\n", __func__,
-                               ib_qp_type_str(init_attr->qp_type));
-                       return ERR_PTR(-EINVAL);
-               }
-               dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
 
                if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
                        if (!pd->uobject) {
@@ -1634,6 +1625,15 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
                                return ERR_PTR(-EINVAL);
                        }
                }
+       } else {
+               /* being cautious here */
+               if (init_attr->qp_type != IB_QPT_XRC_TGT &&
+                   init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
+                       pr_warn("%s: no PD for transport %s\n", __func__,
+                               ib_qp_type_str(init_attr->qp_type));
+                       return ERR_PTR(-EINVAL);
+               }
+               dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
        }
 
        switch (init_attr->qp_type) {
index 6727954ab74be9338e9c3d46e12a55fa3db7a6e7..e8a84d12b7fffe812cd329a88da26f6922c219af 100644 (file)
@@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
 #else
 static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
 static void xpad_led_disconnect(struct usb_xpad *xpad) { }
-static void xpad_identify_controller(struct usb_xpad *xpad) { }
 #endif
 
 static int xpad_start_input(struct usb_xpad *xpad)
index 4d446d5085aad9b110ca7ecb05a3167f859ef12c..c01a1d648f9f087df57aafedf7eb3e56b4df2b8e 100644 (file)
@@ -235,7 +235,7 @@ struct adp5589_kpad {
        unsigned short gpimapsize;
        unsigned extend_cfg;
        bool is_adp5585;
-       bool adp5585_support_row5;
+       bool support_row5;
 #ifdef CONFIG_GPIOLIB
        unsigned char gpiomap[ADP5589_MAXGPIO];
        bool export_gpio;
@@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
        if (kpad->extend_cfg & C4_EXTEND_CFG)
                pin_used[kpad->var->c4_extend_cfg] = true;
 
-       if (!kpad->adp5585_support_row5)
+       if (!kpad->support_row5)
                pin_used[5] = true;
 
        for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client,
 
        switch (id->driver_data) {
        case ADP5585_02:
-               kpad->adp5585_support_row5 = true;
+               kpad->support_row5 = true;
        case ADP5585_01:
                kpad->is_adp5585 = true;
                kpad->var = &const_adp5585;
                break;
        case ADP5589:
+               kpad->support_row5 = true;
                kpad->var = &const_adp5589;
                break;
        }
index 378db10001df5067adcf4fe5bfc2b38e42098574..4401be225d64b28b8ffa798ba71cce07c8a81282 100644 (file)
@@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev,
                led->cdev.brightness = LED_OFF;
 
                error = of_property_read_u32(child, "reg", &reg);
-               if (error != 0 || reg >= num_leds)
+               if (error != 0 || reg >= num_leds) {
+                       of_node_put(child);
                        return -EINVAL;
+               }
 
                led->reg = reg;
                led->priv = priv;
@@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev,
                INIT_WORK(&led->work, cap11xx_led_work);
 
                error = devm_led_classdev_register(dev, &led->cdev);
-               if (error)
+               if (error) {
+                       of_node_put(child);
                        return error;
+               }
 
                priv->num_leds++;
                led++;
index d6d16fa782815481e04609771b09b32e862ce679..1f2337abcf2f333de7b10cc449aaad56aadd5369 100644 (file)
@@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND
          module will be called xen-kbdfront.
 
 config INPUT_SIRFSOC_ONKEY
-       bool "CSR SiRFSoC power on/off/suspend key support"
+       tristate "CSR SiRFSoC power on/off/suspend key support"
        depends on ARCH_SIRF && OF
        default y
        help
index 9d5b89befe6fb059e593c6fe1e6265f0b3819199..ed7237f1953966c378c3822faa9a40f7c6a044dd 100644 (file)
@@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input)
 static const struct of_device_id sirfsoc_pwrc_of_match[] = {
        { .compatible = "sirf,prima2-pwrc" },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
 
 static int sirfsoc_pwrc_probe(struct platform_device *pdev)
index e272f06258cefb3c2119058298ad10f2cb77b7ce..a3f0f5a47490e936e31b45594861d692503429b1 100644 (file)
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
        priv->abs_dev = abs_dev;
        psmouse->private = priv;
 
-       input_set_capability(rel_dev, EV_REL, REL_WHEEL);
-
        /* Set up and register absolute device */
        snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
                 psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
        abs_dev->id.version = psmouse->model;
        abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
 
-       error = input_register_device(priv->abs_dev);
-       if (error)
-               goto init_fail;
-
        /* Set absolute device capabilities */
        input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
        input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
        input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
        input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
 
+       error = input_register_device(priv->abs_dev);
+       if (error)
+               goto init_fail;
+
+       /* Add wheel capability to the relative device */
+       input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
        psmouse->protocol_handler = vmmouse_process_byte;
        psmouse->disconnect = vmmouse_disconnect;
        psmouse->reconnect = vmmouse_reconnect;
index 8f828975ab10b03746e700dd26dce1cb03d2c17e..1ca7f551e2dabe73896f780ec53e4c6735d13019 100644 (file)
@@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio)
        int error;
 
        error = device_attach(&serio->dev);
-       if (error < 0)
+       if (error < 0 && error != -EPROBE_DEFER)
                dev_warn(&serio->dev,
                         "device_attach() failed for %s (%s), error: %d\n",
                         serio->phys, serio->name, error);
index 5d4903a402cc6a5f183010ded6e7af17c1136a49..69828d015d45ffa4747368e79f6fa5f47108c0c9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 0b0f8c17f3f7e0f4df1e69144d693b46741f4025..23fbe382da8b420791a1fec28962454132d4e7ef 100644 (file)
@@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
        int error;
 
        error = device_property_read_u32(dev, "threshold", &val);
-       if (!error)
-               reg_addr->reg_threshold = val;
+       if (!error) {
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
+               tsdata->threshold = val;
+       }
 
        error = device_property_read_u32(dev, "gain", &val);
-       if (!error)
-               reg_addr->reg_gain = val;
+       if (!error) {
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
+               tsdata->gain = val;
+       }
 
        error = device_property_read_u32(dev, "offset", &val);
-       if (!error)
-               reg_addr->reg_offset = val;
+       if (!error) {
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+               tsdata->offset = val;
+       }
 }
 
 static void
index 539b0dea8034b5d5b61e93f5927b353448197c67..e5e223938eecc9f013e33977b1de50bbb3cfae3d 100644 (file)
@@ -2049,7 +2049,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        /* Update device table */
        set_dte_entry(dev_data->devid, domain, ats);
        if (alias != dev_data->devid)
-               set_dte_entry(dev_data->devid, domain, ats);
+               set_dte_entry(alias, domain, ats);
 
        device_flush_dte(dev_data);
 }
index ac7387686ddc7b2a7c7757f2cb3fbd003c8a23af..986a53e3eb96b4bb56faedfdb36b4bd658aacb99 100644 (file)
@@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
 {
        struct pci_dev *pdev;
 
-       if (dev_is_pci(info->dev))
+       if (!dev_is_pci(info->dev))
                return;
 
        pdev = to_pci_dev(info->dev);
index 8bbcbfe7695cf82aabda359b7a9036527e4951b3..381ca5a37a7b7de9b338236b28202c6c46f306df 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/barrier.h>
 
index 715923d5236cc9a3cb721d2f91508e420d64ff47..fb50911b3940c0bd7234a1312e320ccff020f492 100644 (file)
@@ -159,6 +159,7 @@ config TB10X_IRQC
 config TS4800_IRQ
        tristate "TS-4800 IRQ controller"
        select IRQ_DOMAIN
+       depends on HAS_IOMEM
        help
          Support for the TS-4800 FPGA IRQ controller
 
index b12a5d58546f922b460d6aac34c073ff363ef1b8..37199b9b2cfa260659a98da3eb1a48053e7bddfb 100644 (file)
@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
            priority > AT91_AIC_IRQ_MAX_PRIORITY)
                return -EINVAL;
 
-       *val &= AT91_AIC_PRIOR;
+       *val &= ~AT91_AIC_PRIOR;
        *val |= priority;
 
        return 0;
index e23d1d18f9d6a39731bdd34633e2bbc9bc473525..3447549fcc9306a37e7695d27e06589d45d3d0e4 100644 (file)
@@ -875,6 +875,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                }
 
                alloc_size = (1 << order) * PAGE_SIZE;
+retry_alloc_baser:
                alloc_pages = (alloc_size / psz);
                if (alloc_pages > GITS_BASER_PAGES_MAX) {
                        alloc_pages = GITS_BASER_PAGES_MAX;
@@ -938,13 +939,16 @@ retry_baser:
                         * size and retry. If we reach 4K, then
                         * something is horribly wrong...
                         */
+                       free_pages((unsigned long)base, order);
+                       its->tables[i] = NULL;
+
                        switch (psz) {
                        case SZ_16K:
                                psz = SZ_4K;
-                               goto retry_baser;
+                               goto retry_alloc_baser;
                        case SZ_64K:
                                psz = SZ_16K;
-                               goto retry_baser;
+                               goto retry_alloc_baser;
                        }
                }
 
index c22e2d40cb302452624c29efd44c483e577f5333..efe50845939d91fee149acb085912697031dd9a0 100644 (file)
@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
                writel(0, icoll_priv.intr + i);
 
        icoll_add_domain(np, ASM9260_NUM_IRQS);
+       set_handle_irq(icoll_handle_irq);
 
        return 0;
 }
index c71914e8f596c3700d8c5a4da2448b32acf8c41f..5dc5a760c7236971a81dfcf70cd78542abadf8f4 100644 (file)
@@ -605,7 +605,7 @@ err:
        return ERR_PTR(ret);
 }
 
-static struct s3c_irq_data init_eint[32] = {
+static struct s3c_irq_data __maybe_unused init_eint[32] = {
        { .type = S3C_IRQTYPE_NONE, }, /* reserved */
        { .type = S3C_IRQTYPE_NONE, }, /* reserved */
        { .type = S3C_IRQTYPE_NONE, }, /* reserved */
index 4f12c6f01fe7d384d086321a09d73c1fb8fdd4a3..b6819f0fc6083266afd1d9fbea5c7e42c41b13fc 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/macio.h>
 #include <asm/pmac_feature.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 
 #undef DEBUG
 
index 546d05f4358a7c452ee80366b3ae782e685fd7ef..b2bbe8659beda5ad8182b5f4ff7bbefeda970459 100644 (file)
@@ -81,6 +81,7 @@ config STI_MBOX
 config MAILBOX_TEST
        tristate "Mailbox Test Client"
        depends on OF
+       depends on HAS_IOMEM
        help
          Test client to help with testing new Controller driver
          implementations.
index 45d85aea9955404d0c4567f33863e0e3f500674f..8f779a1ec99c4b248c9721fceef19b579432539b 100644 (file)
@@ -81,16 +81,10 @@ static struct mbox_controller pcc_mbox_ctrl = {};
  */
 static struct mbox_chan *get_pcc_channel(int id)
 {
-       struct mbox_chan *pcc_chan;
-
        if (id < 0 || id > pcc_mbox_ctrl.num_chans)
                return ERR_PTR(-ENOENT);
 
-       pcc_chan = (struct mbox_chan *)
-               (unsigned long) pcc_mbox_channels +
-               (id * sizeof(*pcc_chan));
-
-       return pcc_chan;
+       return &pcc_mbox_channels[id];
 }
 
 /**
index 4f22e919787aba1bcab7b8f6799a3c22a6b7ce28..d80cce499a56e595d1f2679170124ca5fc620315 100644 (file)
@@ -210,10 +210,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
        struct block_device *bdev;
        struct mddev *mddev = bitmap->mddev;
        struct bitmap_storage *store = &bitmap->storage;
-       int node_offset = 0;
-
-       if (mddev_is_clustered(bitmap->mddev))
-               node_offset = bitmap->cluster_slot * store->file_pages;
 
        while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
                int size = PAGE_SIZE;
index 4a8e15058e8b56a8a9d89d9f0db50135df44c6d3..685aa2d77e2526935f8f2f416ad8d8681b6c7b14 100644 (file)
@@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
                conf->nfaults = n+1;
 }
 
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void faulty_make_request(struct mddev *mddev, struct bio *bio)
 {
        struct faulty_conf *conf = mddev->private;
        int failit = 0;
@@ -226,7 +226,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
        generic_make_request(bio);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void faulty_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct faulty_conf *conf = mddev->private;
        int n;
@@ -259,7 +259,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
 }
 
 
-static int reshape(struct mddev *mddev)
+static int faulty_reshape(struct mddev *mddev)
 {
        int mode = mddev->new_layout & ModeMask;
        int count = mddev->new_layout >> ModeShift;
@@ -299,7 +299,7 @@ static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disk
        return sectors;
 }
 
-static int run(struct mddev *mddev)
+static int faulty_run(struct mddev *mddev)
 {
        struct md_rdev *rdev;
        int i;
@@ -327,7 +327,7 @@ static int run(struct mddev *mddev)
        md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
        mddev->private = conf;
 
-       reshape(mddev);
+       faulty_reshape(mddev);
 
        return 0;
 }
@@ -344,11 +344,11 @@ static struct md_personality faulty_personality =
        .name           = "faulty",
        .level          = LEVEL_FAULTY,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = faulty_make_request,
+       .run            = faulty_run,
        .free           = faulty_free,
-       .status         = status,
-       .check_reshape  = reshape,
+       .status         = faulty_status,
+       .check_reshape  = faulty_reshape,
        .size           = faulty_size,
 };
 
index 0ded8e97751d270dbfdae0aac73a792043f10621..dd97d42458226b4b284b488983e218e01c814889 100644 (file)
@@ -293,6 +293,7 @@ static void recover_bitmaps(struct md_thread *thread)
 dlm_unlock:
                dlm_unlock_sync(bm_lockres);
 clear_bit:
+               lockres_free(bm_lockres);
                clear_bit(slot, &cinfo->recovery_map);
        }
 }
@@ -682,8 +683,10 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
                bm_lockres = lockres_init(mddev, str, NULL, 1);
                if (!bm_lockres)
                        return -ENOMEM;
-               if (i == (cinfo->slot_number - 1))
+               if (i == (cinfo->slot_number - 1)) {
+                       lockres_free(bm_lockres);
                        continue;
+               }
 
                bm_lockres->flags |= DLM_LKF_NOQUEUE;
                ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
@@ -858,6 +861,7 @@ static int leave(struct mddev *mddev)
        lockres_free(cinfo->token_lockres);
        lockres_free(cinfo->ack_lockres);
        lockres_free(cinfo->no_new_dev_lockres);
+       lockres_free(cinfo->resync_lockres);
        lockres_free(cinfo->bitmap_lockres);
        unlock_all_bitmaps(mddev);
        dlm_release_lockspace(cinfo->lockspace, 2);
index c4b9134092260f22939027a0a82945b146a4ebf9..4e3843f7d24592cc596df1fc6d6c4577a9a6f57f 100644 (file)
@@ -1044,7 +1044,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 {
        struct r1conf *conf = mddev->private;
        struct raid1_info *mirror;
@@ -1422,7 +1422,7 @@ read_again:
        wake_up(&conf->wait_barrier);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid1_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r1conf *conf = mddev->private;
        int i;
@@ -1439,7 +1439,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
        seq_printf(seq, "]");
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
@@ -2472,7 +2472,8 @@ static int init_resync(struct r1conf *conf)
  * that can be installed to exclude normal IO requests.
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+                                  int *skipped)
 {
        struct r1conf *conf = mddev->private;
        struct r1bio *r1_bio;
@@ -2890,7 +2891,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 }
 
 static void raid1_free(struct mddev *mddev, void *priv);
-static int run(struct mddev *mddev)
+static int raid1_run(struct mddev *mddev)
 {
        struct r1conf *conf;
        int i;
@@ -3170,15 +3171,15 @@ static struct md_personality raid1_personality =
        .name           = "raid1",
        .level          = 1,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid1_make_request,
+       .run            = raid1_run,
        .free           = raid1_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid1_status,
+       .error_handler  = raid1_error,
        .hot_add_disk   = raid1_add_disk,
        .hot_remove_disk= raid1_remove_disk,
        .spare_active   = raid1_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid1_sync_request,
        .resize         = raid1_resize,
        .size           = raid1_size,
        .check_reshape  = raid1_reshape,
index ce959b4ae4dfd9a09d33828f5706d3a6aadd7f71..1c1447dd3417cd3e27f7c097a004b5e9b17a10d5 100644 (file)
@@ -1442,7 +1442,7 @@ retry_write:
        one_write_done(r10_bio);
 }
 
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
        sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1484,7 +1484,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
        wake_up(&conf->wait_barrier);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid10_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r10conf *conf = mddev->private;
        int i;
@@ -1562,7 +1562,7 @@ static int enough(struct r10conf *conf, int ignore)
                _enough(conf, 1, ignore);
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r10conf *conf = mddev->private;
@@ -2802,7 +2802,7 @@ static int init_resync(struct r10conf *conf)
  *
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                             int *skipped)
 {
        struct r10conf *conf = mddev->private;
@@ -3523,7 +3523,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        return ERR_PTR(err);
 }
 
-static int run(struct mddev *mddev)
+static int raid10_run(struct mddev *mddev)
 {
        struct r10conf *conf;
        int i, disk_idx, chunk_size;
@@ -4617,15 +4617,15 @@ static struct md_personality raid10_personality =
        .name           = "raid10",
        .level          = 10,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid10_make_request,
+       .run            = raid10_run,
        .free           = raid10_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid10_status,
+       .error_handler  = raid10_error,
        .hot_add_disk   = raid10_add_disk,
        .hot_remove_disk= raid10_remove_disk,
        .spare_active   = raid10_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid10_sync_request,
        .quiesce        = raid10_quiesce,
        .size           = raid10_size,
        .resize         = raid10_resize,
index a086014dcd49915d5dad8b2c57b9bbfa30b39e5c..b4f02c9959f23c1bb5e8feccb2b9bf6e1c59e862 100644 (file)
@@ -2496,7 +2496,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
        dev->sector = raid5_compute_blocknr(sh, i, previous);
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r5conf *conf = mddev->private;
@@ -2958,7 +2958,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
         * If several bio share a stripe. The bio bi_phys_segments acts as a
         * reference count to avoid race. The reference count should already be
         * increased before this function is called (for example, in
-        * make_request()), so other bio sharing this stripe will not free the
+        * raid5_make_request()), so other bio sharing this stripe will not free the
         * stripe. If a stripe is owned by one stripe, the stripe lock will
         * protect it.
         */
@@ -5135,7 +5135,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
        }
 }
 
-static void make_request(struct mddev *mddev, struct bio * bi)
+static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 {
        struct r5conf *conf = mddev->private;
        int dd_idx;
@@ -5225,7 +5225,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                new_sector = raid5_compute_sector(conf, logical_sector,
                                                  previous,
                                                  &dd_idx, NULL);
-               pr_debug("raid456: make_request, sector %llu logical %llu\n",
+               pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
                        (unsigned long long)new_sector,
                        (unsigned long long)logical_sector);
 
@@ -5575,7 +5575,8 @@ ret:
        return retn;
 }
 
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
+                                         int *skipped)
 {
        struct r5conf *conf = mddev->private;
        struct stripe_head *sh;
@@ -6674,7 +6675,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
        return 0;
 }
 
-static int run(struct mddev *mddev)
+static int raid5_run(struct mddev *mddev)
 {
        struct r5conf *conf;
        int working_disks = 0;
@@ -7048,7 +7049,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
        mddev->to_remove = &raid5_attrs_group;
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid5_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
        int i;
@@ -7864,15 +7865,15 @@ static struct md_personality raid6_personality =
        .name           = "raid6",
        .level          = 6,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid5_make_request,
+       .run            = raid5_run,
        .free           = raid5_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid5_status,
+       .error_handler  = raid5_error,
        .hot_add_disk   = raid5_add_disk,
        .hot_remove_disk= raid5_remove_disk,
        .spare_active   = raid5_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid5_sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
        .check_reshape  = raid6_check_reshape,
@@ -7887,15 +7888,15 @@ static struct md_personality raid5_personality =
        .name           = "raid5",
        .level          = 5,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid5_make_request,
+       .run            = raid5_run,
        .free           = raid5_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid5_status,
+       .error_handler  = raid5_error,
        .hot_add_disk   = raid5_add_disk,
        .hot_remove_disk= raid5_remove_disk,
        .spare_active   = raid5_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid5_sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
        .check_reshape  = raid5_check_reshape,
@@ -7911,15 +7912,15 @@ static struct md_personality raid4_personality =
        .name           = "raid4",
        .level          = 4,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid5_make_request,
+       .run            = raid5_run,
        .free           = raid5_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid5_status,
+       .error_handler  = raid5_error,
        .hot_add_disk   = raid5_add_disk,
        .hot_remove_disk= raid5_remove_disk,
        .spare_active   = raid5_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid5_sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
        .check_reshape  = raid5_check_reshape,
index 9c59f4306883a6de666052c613896b850313b8d0..f5956402fc69dfcd88c9870abe5ea76e3d6e6d65 100644 (file)
@@ -38,7 +38,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
 #endif
 
 /* lnb control */
-#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
+#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL)
 static int flexcop_set_voltage(struct dvb_frontend *fe,
                               enum fe_sec_voltage voltage)
 {
@@ -68,7 +68,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe,
 #endif
 
 #if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
-static int flexcop_sleep(struct dvb_frontend* fe)
+static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe)
 {
        struct flexcop_device *fc = fe->dvb->priv;
        if (fc->fe_sleep)
index 412c5daf2b48c4208f2fb0bd60ed50fb2f4acbef..0f5114d406f822004dca7f2b6e148ca49110d710 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
  * flexcop.c - main module part
- * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@posteo.de>
  * based on skystar2-driver Copyright (C) 2003 Vadim Catana, skystar@moldova.cc
  *
  * Acknowledgements:
@@ -34,7 +34,7 @@
 #include "flexcop.h"
 
 #define DRIVER_NAME "B2C2 FlexcopII/II(b)/III digital TV receiver chip"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de"
 
 #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
 #define DEBSTATUS ""
index 577e82058fdc9e268bf1268651a6402f7f7de9ca..50e3f76d4847412e1280e14c1fe0b44847c8fd57 100644 (file)
@@ -1,6 +1,6 @@
 /*  cypress_firmware.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for downloading the firmware to Cypress FX 1
index e493cbc7a5289ecae3c8c80fedd52b358e1ffd90..1e4f2735620529a5bc3531947c15e003679b95df 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for downloading the firmware to Cypress FX 1
index 1c1c298d2289b9bba1ab8fdd1629c660dedfba92..dbdbb84294c5f1b5a225fe493a603d18bdbcab13 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-ids.h is part of the DVB USB library.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) see
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see
  * dvb-usb-init.c for copyright information.
  *
  * a header file containing define's for the USB device supported by the
 #define USB_PID_DIBCOM_STK807XP                                0x1f90
 #define USB_PID_DIBCOM_STK807XPVR                      0x1f98
 #define USB_PID_DIBCOM_STK8096GP                        0x1fa0
+#define USB_PID_DIBCOM_STK8096PVR                       0x1faa
 #define USB_PID_DIBCOM_NIM8096MD                        0x1fa8
 #define USB_PID_DIBCOM_TFE8096P                                0x1f9C
 #define USB_PID_DIBCOM_ANCHOR_2135_COLD                        0x2131
 #define USB_PID_TECHNOTREND_CONNECT_CT3650             0x300d
 #define USB_PID_TECHNOTREND_CONNECT_S2_4600             0x3011
 #define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI                0x3012
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2      0x3015
 #define USB_PID_TECHNOTREND_TVSTICK_CT2_4400           0x3014
 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY       0x005a
 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2     0x0081
 #define USB_PID_TERRATEC_CINERGY_T_EXPRESS             0x0062
 #define USB_PID_TERRATEC_CINERGY_T_XXS                 0x0078
 #define USB_PID_TERRATEC_CINERGY_T_XXS_2               0x00ab
+#define USB_PID_TERRATEC_CINERGY_S2_R1                 0x00a8
+#define USB_PID_TERRATEC_CINERGY_S2_R2                 0x00b0
+#define USB_PID_TERRATEC_CINERGY_S2_R3                 0x0102
+#define USB_PID_TERRATEC_CINERGY_S2_R4                 0x0105
 #define USB_PID_TERRATEC_H7                            0x10b4
 #define USB_PID_TERRATEC_H7_2                          0x10a3
 #define USB_PID_TERRATEC_H7_3                          0x10a5
index 40080645341e727a6380feac3f991fb022b86bf4..b1255b7c0b0edb7c34dd65c3983a1f02ededb949 100644 (file)
@@ -899,10 +899,10 @@ void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec)
        s32 delta;
 
        *waketime = ktime_add_us(*waketime, add_usec);
-       delta = ktime_us_delta(ktime_get_real(), *waketime);
+       delta = ktime_us_delta(ktime_get_boottime(), *waketime);
        if (delta > 2500) {
                msleep((delta - 1500) / 1000);
-               delta = ktime_us_delta(ktime_get_real(), *waketime);
+               delta = ktime_us_delta(ktime_get_boottime(), *waketime);
        }
        if (delta > 0)
                udelay(delta);
@@ -1162,18 +1162,24 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
        _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
 };
 
-static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
+static void dtv_property_dump(struct dvb_frontend *fe,
+                             bool is_set,
+                             struct dtv_property *tvp)
 {
        int i;
 
        if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
-               dev_warn(fe->dvb->device, "%s: tvp.cmd = 0x%08x undefined\n",
-                               __func__, tvp->cmd);
+               dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n",
+                               __func__,
+                               is_set ? "SET" : "GET",
+                               tvp->cmd);
                return;
        }
 
-       dev_dbg(fe->dvb->device, "%s: tvp.cmd    = 0x%08x (%s)\n", __func__,
-                       tvp->cmd, dtv_cmds[tvp->cmd].name);
+       dev_dbg(fe->dvb->device, "%s: %s tvp.cmd    = 0x%08x (%s)\n", __func__,
+               is_set ? "SET" : "GET",
+               tvp->cmd,
+               dtv_cmds[tvp->cmd].name);
 
        if (dtv_cmds[tvp->cmd].buffer) {
                dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n",
@@ -1589,7 +1595,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
                        return r;
        }
 
-       dtv_property_dump(fe, tvp);
+       dtv_property_dump(fe, false, tvp);
 
        return 0;
 }
@@ -1830,6 +1836,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
                        return r;
        }
 
+       dtv_property_dump(fe, true, tvp);
+
        switch(tvp->cmd) {
        case DTV_CLEAR:
                /*
@@ -2451,7 +2459,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
                        u8 last = 1;
                        if (dvb_frontend_debug)
                                printk("%s switch command: 0x%04lx\n", __func__, swcmd);
-                       nexttime = ktime_get_real();
+                       nexttime = ktime_get_boottime();
                        if (dvb_frontend_debug)
                                tv[0] = nexttime;
                        /* before sending a command, initialize by sending
@@ -2462,7 +2470,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
 
                        for (i = 0; i < 9; i++) {
                                if (dvb_frontend_debug)
-                                       tv[i+1] = ktime_get_real();
+                                       tv[i+1] = ktime_get_boottime();
                                if ((swcmd & 0x01) != last) {
                                        /* set voltage to (last ? 13V : 18V) */
                                        fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18);
index 560450a0b32a5b2399196aa2a9a8d0ff161534e9..1b9732ee0a4fbd4720289d336148941e4e0de550 100644 (file)
@@ -58,7 +58,7 @@ static const char * const dnames[] = {
 #define DVB_MAX_IDS            MAX_DVB_MINORS
 #else
 #define DVB_MAX_IDS            4
-#define nums2minor(num,type,id)        ((num << 6) | (id << 4) | type)
+#define nums2minor(num, type, id)      ((num << 6) | (id << 4) | type)
 #define MAX_DVB_MINORS         (DVB_MAX_ADAPTERS*64)
 #endif
 
@@ -85,7 +85,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
                file->private_data = dvbdev;
                replace_fops(file, new_fops);
                if (file->f_op->open)
-                       err = file->f_op->open(inode,file);
+                       err = file->f_op->open(inode, file);
                up_read(&minor_rwsem);
                mutex_unlock(&dvbdev_mutex);
                return err;
@@ -352,7 +352,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
        ret = media_device_register_entity(dvbdev->adapter->mdev,
                                           dvbdev->entity);
        if (ret)
-               return (ret);
+               return ret;
 
        printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
                __func__, dvbdev->entity->name);
@@ -620,8 +620,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
                        return -ENOMEM;
                adap->conn = conn;
 
-               adap->conn_pads = kcalloc(1, sizeof(*adap->conn_pads),
-                                           GFP_KERNEL);
+               adap->conn_pads = kzalloc(sizeof(*adap->conn_pads), GFP_KERNEL);
                if (!adap->conn_pads)
                        return -ENOMEM;
 
@@ -661,7 +660,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
        if (ntuner && ndemod) {
                ret = media_create_pad_links(mdev,
                                             MEDIA_ENT_F_TUNER,
-                                            tuner, TUNER_PAD_IF_OUTPUT,
+                                            tuner, TUNER_PAD_OUTPUT,
                                             MEDIA_ENT_F_DTV_DEMOD,
                                             demod, 0, MEDIA_LNK_FL_ENABLED,
                                             false);
@@ -868,7 +867,7 @@ int dvb_usercopy(struct file *file,
                        parg = sbuf;
                } else {
                        /* too big to allocate from stack */
-                       mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL);
+                       mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
                        if (NULL == mbuf)
                                return -ENOMEM;
                        parg = mbuf;
index e23197da84af51bc244772d2dc2e7b01caf8eae9..41ab5defb7989b058bb42dfa90ecd972ded4381d 100644 (file)
@@ -1344,6 +1344,10 @@ err:
 static void af9013_release(struct dvb_frontend *fe)
 {
        struct af9013_state *state = fe->demodulator_priv;
+
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&state->statistics_work);
+
        kfree(state);
 }
 
index bc35206a08215b83a6cf49719cfd1db9363af98c..8b328d1ca8d365f6cac693a443a80adee4e9041e 100644 (file)
@@ -1372,6 +1372,9 @@ static int af9033_remove(struct i2c_client *client)
 
        dev_dbg(&dev->client->dev, "\n");
 
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&dev->stat_work);
+
        dev->fe.ops.release = NULL;
        dev->fe.demodulator_priv = NULL;
        kfree(dev);
index d30275f27644bd963d16329f117d73fc3364c15d..bb698839e477e23a07a04dbc15082cdd341d04f0 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-5, B2C2 inc.
  *
- *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  *  This driver is "hard-coded" to be used with the 1st generation of
  *  Technisat/B2C2's Air2PC ATSC PCI/USB cards/boxes. The pll-programming
@@ -865,5 +865,5 @@ static struct dvb_frontend_ops bcm3510_ops = {
 };
 
 MODULE_DESCRIPTION("Broadcom BCM3510 ATSC (8VSB/16VSB & ITU J83 AnnexB FEC QAM64/256) demodulator driver");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_LICENSE("GPL");
index ff66492fb9408f5324c749d64638df67929d1077..961c2eb87c684a595ede26ac332a6889c8323309 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-5, B2C2 inc.
  *
- *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 3bb1bc2a04f006a43d849ad2dad914ffde425852..67f24686c31b9dd3306bc51c5271d9af25a47e11 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-5, B2C2 inc.
  *
- *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 24a457d9d803372dd25ced70284bdab381c0d2f5..ba4cb7557aa577ce762ef455e26d30176ad004b8 100644 (file)
@@ -606,8 +606,7 @@ static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
                int val)
 {
-       struct cxd2820r_priv *priv =
-                       container_of(chip, struct cxd2820r_priv, gpio_chip);
+       struct cxd2820r_priv *priv = gpiochip_get_data(chip);
        u8 gpio[GPIO_COUNT];
 
        dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
@@ -620,8 +619,7 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
 
 static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
 {
-       struct cxd2820r_priv *priv =
-                       container_of(chip, struct cxd2820r_priv, gpio_chip);
+       struct cxd2820r_priv *priv = gpiochip_get_data(chip);
        u8 gpio[GPIO_COUNT];
 
        dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
@@ -636,8 +634,7 @@ static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
 
 static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr)
 {
-       struct cxd2820r_priv *priv =
-                       container_of(chip, struct cxd2820r_priv, gpio_chip);
+       struct cxd2820r_priv *priv = gpiochip_get_data(chip);
 
        dev_dbg(&priv->i2c->dev, "%s: nr=%d\n", __func__, nr);
 
@@ -731,7 +728,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
                priv->gpio_chip.base = -1; /* dynamic allocation */
                priv->gpio_chip.ngpio = GPIO_COUNT;
                priv->gpio_chip.can_sleep = 1;
-               ret = gpiochip_add(&priv->gpio_chip);
+               ret = gpiochip_add_data(&priv->gpio_chip, priv);
                if (ret)
                        goto error;
 
index 0b8fb5dd18898a46c93348caabd4818ec4dbe37b..ee7d66997ccde63508b9f4ac1a626cd9c1f0f286 100644 (file)
@@ -774,6 +774,6 @@ free_mem:
 }
 EXPORT_SYMBOL(dib0070_attach);
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
 MODULE_LICENSE("GPL");
index 47cb72243b9da6bfc9c62271592633f2ecadc3db..976ee034a4307052a344c6c70592826a8b535641 100644 (file)
@@ -2669,7 +2669,7 @@ free_mem:
 }
 EXPORT_SYMBOL(dib0090_fw_register);
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
-MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
 MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner");
 MODULE_LICENSE("GPL");
index 6ae9899b5b450bec39a56601134accd686dbd990..d5dfafb4ef13b665a5439c9a68ce72eb69452486 100644 (file)
@@ -2,11 +2,11 @@
  * public header file of the frontend drivers for mobile DVB-T demodulators
  * DiBcom 3000M-B and DiBcom 3000P/M-C (http://www.dibcom.fr/)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DibCom, which has
  *
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
@@ -14,7 +14,7 @@
  *
  * Acknowledgements
  *
- *  Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
+ *  Amaury Demol from DiBcom for providing specs and driver
  *  sources, on which this driver (and the dvb-dibusb) are based.
  *
  * see Documentation/dvb/README.dvb-usb for more information
index 7a61172d0d453b9aa38282dbb6cd97d239387080..3ca300939f790bb1c3ebaceca298932a19f1ac3c 100644 (file)
@@ -2,11 +2,11 @@
  * Frontend driver for mobile DVB-T demodulator DiBcom 3000M-B
  * DiBcom (http://www.dibcom.fr/)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DibCom, which has
  *
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
@@ -14,7 +14,7 @@
  *
  * Acknowledgements
  *
- *  Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
+ *  Amaury Demol from DiBcom for providing specs and driver
  *  sources, on which this driver (and the dvb-dibusb) are based.
  *
  * see Documentation/dvb/README.dvb-usb for more information
@@ -36,7 +36,7 @@
 /* Version information */
 #define DRIVER_VERSION "0.1"
 #define DRIVER_DESC "DiBcom 3000M-B DVB-T demodulator"
-#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@desy.de"
+#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@posteo.de"
 
 static int debug;
 module_param(debug, int, 0644);
index 9dc235aa44b71a712ba7c57918f9c786dd237e15..0459d5c84314e7cca51532d2e088ddd28b6cc130 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * dib3000mb_priv.h
  *
- * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
index 583d6b7fabedad77116565a8f948c0e10ec28318..ac90ed3af37e40a6a184dd7698a6f756140adddb 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for DiBcom DiB3000MC/P-demodulator.
  *
  * Copyright (C) 2004-7 DiBcom (http://www.dibcom.fr/)
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * This code is partially based on the previous dib3000mc.c .
  *
@@ -939,6 +939,6 @@ static struct dvb_frontend_ops dib3000mc_ops = {
        .read_ucblocks        = dib3000mc_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 3000MC/P COFDM demodulator");
 MODULE_LICENSE("GPL");
index 74816f79361180c0191c4cdd69239121371543f5..b37e69e6a58c7ca63e05167780de8e9cadd86738 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for DiBcom DiB3000MC/P-demodulator.
  *
  * Copyright (C) 2004-6 DiBcom (http://www.dibcom.fr/)
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher\@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * This code is partially based on the previous dib3000mc.c .
  *
index 35eb71fe3c2b4977d5db8bf68e5f7b8039cb60da..8b21cccf3c3a04184629af11a30cb0d9c3d91d9b 100644 (file)
@@ -1465,6 +1465,6 @@ static struct dvb_frontend_ops dib7000m_ops = {
        .read_ucblocks        = dib7000m_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator");
 MODULE_LICENSE("GPL");
index 33be5d6b9e10589275529a83525914ad6a33a470..65ab79ed5e3ecb3e3a00c31adbb6b7ca5e8e03b6 100644 (file)
@@ -2834,7 +2834,7 @@ static struct dvb_frontend_ops dib7000p_ops = {
        .read_ucblocks = dib7000p_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <olivie.grenie@parrot.com>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator");
 MODULE_LICENSE("GPL");
index 94c26270fff0e98375161ddad5e9c0ae5fc8d2bd..349d2f1f62ce0341e2612d519b7d7ee3f043ed21 100644 (file)
@@ -4516,6 +4516,6 @@ void *dib8000_attach(struct dib8000_ops *ops)
 }
 EXPORT_SYMBOL(dib8000_attach);
 
-MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@dibcom.fr, " "Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
 MODULE_LICENSE("GPL");
index 8f92aca0b073555a40fa422b864ddb89646e0232..91888a2f53018bc2755a31d2a128560486226ce7 100644 (file)
@@ -2589,7 +2589,7 @@ static struct dvb_frontend_ops dib9000_ops = {
        .read_ucblocks = dib9000_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
-MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
 MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator");
 MODULE_LICENSE("GPL");
index 43be7238311ec513726e8ecaa13d0864c2b622dd..723358d7ca84debd209750f58a1423b1dd3861f9 100644 (file)
@@ -510,6 +510,6 @@ u32 systime(void)
 }
 EXPORT_SYMBOL(systime);
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
 MODULE_LICENSE("GPL");
index b792f305cf15f05fc1289a4bdd0566269bfd15e0..74b771218033d76dee58d151dca1addd54404b13 100644 (file)
@@ -900,6 +900,9 @@ static int rtl2830_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&dev->stat_work);
+
        i2c_del_mux_adapter(dev->adapter);
        regmap_exit(dev->regmap);
        kfree(dev);
index 2b93241d4bc102618c722107477b08482c7d86bb..8bf716a8ea58c2d5161ab66e08cae9ec1f1f3545 100644 (file)
@@ -225,22 +225,18 @@ static int si2165_writereg32(struct si2165_state *state, const u16 reg, u32 val)
 static int si2165_writereg_mask8(struct si2165_state *state, const u16 reg,
                                 u8 val, u8 mask)
 {
-       int ret;
-       u8 tmp;
-
        if (mask != 0xff) {
-               ret = si2165_readreg8(state, reg, &tmp);
+               u8 tmp;
+               int ret = si2165_readreg8(state, reg, &tmp);
+
                if (ret < 0)
-                       goto err;
+                       return ret;
 
                val &= mask;
                tmp &= ~mask;
                val |= tmp;
        }
-
-       ret = si2165_writereg8(state, reg, val);
-err:
-       return ret;
+       return si2165_writereg8(state, reg, val);
 }
 
 #define REG16(reg, val) { (reg), (val) & 0xff }, { (reg)+1, (val)>>8 & 0xff }
@@ -825,19 +821,19 @@ static int si2165_set_frontend_dvbt(struct dvb_frontend *fe)
        struct si2165_state *state = fe->demodulator_priv;
        u32 dvb_rate = 0;
        u16 bw10k;
+       u32 bw_hz = p->bandwidth_hz;
 
        dprintk("%s: called\n", __func__);
 
        if (!state->has_dvbt)
                return -EINVAL;
 
-       if (p->bandwidth_hz > 0) {
-               dvb_rate = p->bandwidth_hz * 8 / 7;
-               bw10k = p->bandwidth_hz / 10000;
-       } else {
-               dvb_rate = 8 * 8 / 7;
-               bw10k = 800;
-       }
+       /* no bandwidth auto-detection */
+       if (bw_hz == 0)
+               return -EINVAL;
+
+       dvb_rate = bw_hz * 8 / 7;
+       bw10k = bw_hz / 10000;
 
        ret = si2165_adjust_pll_divl(state, 12);
        if (ret < 0)
index a8177807fb6568f6d4a261c3509242ac8152b7b3..c43f36d323409b768c5dfbe2ff0fab22e03995af 100644 (file)
@@ -422,7 +422,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
        if (debug_legacy_dish_switch)
                printk ("%s switch command: 0x%04lx\n",__func__, cmd);
 
-       nexttime = ktime_get_real();
+       nexttime = ktime_get_boottime();
        if (debug_legacy_dish_switch)
                tv[0] = nexttime;
        stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */
@@ -431,7 +431,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
 
        for (i=0; i<9; i++) {
                if (debug_legacy_dish_switch)
-                       tv[i+1] = ktime_get_real();
+                       tv[i+1] = ktime_get_boottime();
                if((cmd & 0x01) != last) {
                        /* set voltage to (last ? 13V : 18V) */
                        stv0299_writeregI (state, 0x0c, reg0x0c | (last ? lv_mask : 0x50));
index e66154e5c1d7710e0ddcc8c6183f707be0e388ea..a62c01e454f518399270443f0311106a39ef426b 100644 (file)
@@ -355,7 +355,7 @@ static struct dvb_tuner_ops stv6110x_ops = {
        .release                = stv6110x_release
 };
 
-static struct stv6110x_devctl stv6110x_ctl = {
+static const struct stv6110x_devctl stv6110x_ctl = {
        .tuner_init             = stv6110x_init,
        .tuner_sleep            = stv6110x_sleep,
        .tuner_set_mode         = stv6110x_set_mode,
@@ -369,7 +369,7 @@ static struct stv6110x_devctl stv6110x_ctl = {
        .tuner_get_status       = stv6110x_get_status,
 };
 
-struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
                                        const struct stv6110x_config *config,
                                        struct i2c_adapter *i2c)
 {
index 9f7eb251aec32cb0a6d7bf821000a95690674332..696b6e5b9e7be1969fc51d1fb482ad52f52b02e7 100644 (file)
@@ -55,12 +55,12 @@ struct stv6110x_devctl {
 
 #if IS_REACHABLE(CONFIG_DVB_STV6110x)
 
-extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+extern const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
                                               const struct stv6110x_config *config,
                                               struct i2c_adapter *i2c);
 
 #else
-static inline struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+static inline const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
                                                      const struct stv6110x_config *config,
                                                      struct i2c_adapter *i2c)
 {
index 0ec936a660a765801a60791f095981e27943946d..a993aba27b7ef33e8c7a48dc849950f737504b28 100644 (file)
@@ -70,7 +70,7 @@ struct stv6110x_state {
        const struct stv6110x_config    *config;
        u8                              regs[8];
 
-       struct stv6110x_devctl          *devctl;
+       const struct stv6110x_devctl    *devctl;
 };
 
 #endif /* __STV6110x_PRIV_H */
index 0e209b56c76c1c487007121e336e0438944acb93..c6abeb4fba9db51827fc815fde920b1a30faa1fa 100644 (file)
@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
 {
        struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
        struct tda1004x_state* state = fe->demodulator_priv;
+       int status;
 
        dprintk("%s\n", __func__);
 
+       status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+       if (status == -1)
+               return -EIO;
+
+       /* Only update the properties cache if device is locked */
+       if (!(status & 8))
+               return 0;
+
        // inversion status
        fe_params->inversion = INVERSION_OFF;
        if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
index 7979e5d6498b52a2c01f4db05a44079eeb5c3bb5..14b410ffe612c2d76381ab0177d82970384913e8 100644 (file)
@@ -712,6 +712,10 @@ static int ts2020_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
+       /* stop statistics polling */
+       if (!dev->dont_poll)
+               cancel_delayed_work_sync(&dev->stat_work);
+
        regmap_exit(dev->regmap);
        kfree(dev);
        return 0;
index f8dd7505b5294ff167352a1d3ce8823605916db7..801a9b09d5e51e913089d4d29b8f19345730440e 100644 (file)
@@ -1884,6 +1884,26 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
        return 0;
 }
 
+static int adv76xx_get_selection(struct v4l2_subdev *sd,
+                                struct v4l2_subdev_pad_config *cfg,
+                                struct v4l2_subdev_selection *sel)
+{
+       struct adv76xx_state *state = to_state(sd);
+
+       if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+               return -EINVAL;
+       /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */
+       if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS)
+               return -EINVAL;
+
+       sel->r.left     = 0;
+       sel->r.top      = 0;
+       sel->r.width    = state->timings.bt.width;
+       sel->r.height   = state->timings.bt.height;
+
+       return 0;
+}
+
 static int adv76xx_set_format(struct v4l2_subdev *sd,
                              struct v4l2_subdev_pad_config *cfg,
                              struct v4l2_subdev_format *format)
@@ -2404,6 +2424,7 @@ static const struct v4l2_subdev_video_ops adv76xx_video_ops = {
 
 static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = {
        .enum_mbus_code = adv76xx_enum_mbus_code,
+       .get_selection = adv76xx_get_selection,
        .get_fmt = adv76xx_get_format,
        .set_fmt = adv76xx_set_format,
        .get_edid = adv76xx_get_edid,
@@ -2799,6 +2820,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
        struct device_node *endpoint;
        struct device_node *np;
        unsigned int flags;
+       int ret;
        u32 v;
 
        np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
@@ -2808,7 +2830,11 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
        if (!endpoint)
                return -EINVAL;
 
-       v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+       ret = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+       if (ret) {
+               of_node_put(endpoint);
+               return ret;
+       }
 
        if (!of_property_read_u32(endpoint, "default-input", &v))
                state->pdata.default_input = v;
index 830491960add2d239d817e46db7c4f0d2dc8d00a..bf82726fd3f44f684ac5677286764255055cc9a7 100644 (file)
@@ -478,7 +478,6 @@ static const struct i2c_device_id ir_kbd_id[] = {
        { "ir_rx_z8f0811_hdpvr", 0 },
        { }
 };
-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
 
 static struct i2c_driver ir_kbd_driver = {
        .driver = {
index a84561d0d4a809c761c5ed51d58af93aa7737dd7..e016626ebf89b25de49a282582676bcd710b10c2 100644 (file)
@@ -688,6 +688,9 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
        int msp_revision;
        int msp_product, msp_prod_hi, msp_prod_lo;
        int msp_rom;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       int ret;
+#endif
 
        if (!id)
                strlcpy(client->name, "msp3400", sizeof(client->name));
@@ -704,6 +707,17 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
        sd = &state->sd;
        v4l2_i2c_subdev_init(sd, client, &msp_ops);
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       state->pads[IF_AUD_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+       state->pads[IF_AUD_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+       sd->entity.function = MEDIA_ENT_F_IF_AUD_DECODER;
+
+       ret = media_entity_pads_init(&sd->entity, 2, state->pads);
+       if (ret < 0)
+               return ret;
+#endif
+
        state->v4l2_std = V4L2_STD_NTSC;
        state->detected_std = V4L2_STD_ALL;
        state->audmode = V4L2_TUNER_MODE_STEREO;
index 6cae21366ed5e6b4ab7003dee04ba6504be05652..a8702aca187a97176eac9c4b2795e1b9c2969872 100644 (file)
@@ -7,6 +7,7 @@
 #include <media/drv-intf/msp3400.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
 
 /* ---------------------------------------------------------------------- */
 
@@ -102,6 +103,10 @@ struct msp_state {
        wait_queue_head_t    wq;
        unsigned int         restart:1;
        unsigned int         watch_stereo:1;
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+       struct media_pad pads[IF_AUD_DEC_PAD_NUM_PADS];
+#endif
 };
 
 static inline struct msp_state *to_state(struct v4l2_subdev *sd)
index b9fea11d6b0b0e08ff2c4440f976a97a077aaa99..9ed1b26b6549822a46338f6ce2b826147f3903c4 100644 (file)
@@ -50,6 +50,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
 struct mt9v011 {
        struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad pad;
+#endif
        struct v4l2_ctrl_handler ctrls;
        unsigned width, height;
        unsigned xtal;
@@ -493,6 +496,9 @@ static int mt9v011_probe(struct i2c_client *c,
        u16 version;
        struct mt9v011 *core;
        struct v4l2_subdev *sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       int ret;
+#endif
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(c->adapter,
@@ -506,6 +512,15 @@ static int mt9v011_probe(struct i2c_client *c,
        sd = &core->sd;
        v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
 
+#ifdef CONFIG_MEDIA_CONTROLLER
+       core->pad.flags = MEDIA_PAD_FL_SOURCE;
+       sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+       ret = media_entity_pads_init(&sd->entity, 1, &core->pad);
+       if (ret < 0)
+               return ret;
+#endif
+
        /* Check if the sensor is really a MT9V011 */
        version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
        if ((version != MT9V011_VERSION) &&
index 2e1d116a64e7b43eb1f6e886f842d9ef296664c9..501b37039449730411ad1673da7f4458d15ce373 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/log2.h>
 #include <linux/mutex.h>
@@ -251,6 +252,8 @@ struct mt9v032 {
 
        struct regmap *regmap;
        struct clk *clk;
+       struct gpio_desc *reset_gpio;
+       struct gpio_desc *standby_gpio;
 
        struct mt9v032_platform_data *pdata;
        const struct mt9v032_model_info *model;
@@ -312,16 +315,31 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
        struct regmap *map = mt9v032->regmap;
        int ret;
 
+       if (mt9v032->reset_gpio)
+               gpiod_set_value_cansleep(mt9v032->reset_gpio, 1);
+
        ret = clk_set_rate(mt9v032->clk, mt9v032->sysclk);
        if (ret < 0)
                return ret;
 
+       /* System clock has to be enabled before releasing the reset */
        ret = clk_prepare_enable(mt9v032->clk);
        if (ret)
                return ret;
 
        udelay(1);
 
+       if (mt9v032->reset_gpio) {
+               gpiod_set_value_cansleep(mt9v032->reset_gpio, 0);
+
+               /* After releasing reset we need to wait 10 clock cycles
+                * before accessing the sensor over I2C. As the minimum SYSCLK
+                * frequency is 13MHz, waiting 1µs will be enough in the worst
+                * case.
+                */
+               udelay(1);
+       }
+
        /* Reset the chip and stop data read out */
        ret = regmap_write(map, MT9V032_RESET, 1);
        if (ret < 0)
@@ -954,6 +972,16 @@ static int mt9v032_probe(struct i2c_client *client,
        if (IS_ERR(mt9v032->clk))
                return PTR_ERR(mt9v032->clk);
 
+       mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+                                                     GPIOD_OUT_HIGH);
+       if (IS_ERR(mt9v032->reset_gpio))
+               return PTR_ERR(mt9v032->reset_gpio);
+
+       mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+                                                       GPIOD_OUT_LOW);
+       if (IS_ERR(mt9v032->standby_gpio))
+               return PTR_ERR(mt9v032->standby_gpio);
+
        mutex_init(&mt9v032->power_lock);
        mt9v032->pdata = pdata;
        mt9v032->model = (const void *)did->driver_data;
index 02b9a3440557b49e9ad7dd366d201ebcd6e962ed..1f999e9c0118e358ac111c17084c21799a677db0 100644 (file)
@@ -1321,10 +1321,6 @@ static int ov2659_detect(struct v4l2_subdev *sd)
        }
        usleep_range(1000, 2000);
 
-       ret = ov2659_init(sd, 0);
-       if (ret < 0)
-               return ret;
-
        /* Check sensor revision */
        ret = ov2659_read(client, REG_SC_CHIP_ID_H, &pid);
        if (!ret)
@@ -1338,8 +1334,10 @@ static int ov2659_detect(struct v4l2_subdev *sd)
                        dev_err(&client->dev,
                                "Sensor detection failed (%04X, %d)\n",
                                id, ret);
-               else
+               else {
                        dev_info(&client->dev, "Found OV%04X sensor\n", id);
+                       ret = ov2659_init(sd, 0);
+               }
        }
 
        return ret;
index 57b3d27993a40ffadf7d42a5b791e71f3f74eb76..08af58fb8e7d0089a410b8ceb1e4c2466ba9f9da 100644 (file)
@@ -1639,8 +1639,10 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
                return 0;
        }
 
-       v4l2_of_parse_endpoint(node_ep, &ep);
+       ret = v4l2_of_parse_endpoint(node_ep, &ep);
        of_node_put(node_ep);
+       if (ret)
+               return ret;
 
        if (ep.bus_type != V4L2_MBUS_CSI2) {
                dev_err(dev, "unsupported bus type\n");
index 7d65b36434b1f3ccfccf25a7ff2071821080ff4b..72ef9f936e6ceb13ad7b94e5c998ffa9543700e8 100644 (file)
@@ -37,7 +37,6 @@ enum spi_direction {
        SPI_DIR_RX,
        SPI_DIR_TX
 };
-MODULE_DEVICE_TABLE(of, s5c73m3_spi_ids);
 
 static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len,
                                                        enum spi_direction dir)
index fc3a5a8e6c9c7ebafb5b8a3ba97e1fa7cbe564fd..db82ed05792ef7d21c99879c7cd38e9a754af0fe 100644 (file)
@@ -1868,8 +1868,11 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
                return -EINVAL;
        }
 
-       v4l2_of_parse_endpoint(node_ep, &ep);
+       ret = v4l2_of_parse_endpoint(node_ep, &ep);
        of_node_put(node_ep);
+       if (ret)
+               return ret;
+
        state->bus_type = ep.bus_type;
 
        switch (state->bus_type) {
index b9e43ffa50859caeb4ce2b8343f660a598d249ff..cbe4711e9b31acfca212a8ef0a96867c03bb45fb 100644 (file)
@@ -144,8 +144,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
        mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
        if (mf) {
                mutex_lock(&sensor->lock);
-               if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-                       *mf = fmt->format;
+               *mf = fmt->format;
                mutex_unlock(&sensor->lock);
        }
        return 0;
index 24d2b76dbe97e7a24d762b2bc58a6ba2bc2c6051..d2a1ce2bc7f5ce41eec4c1c6d5069280c358df5d 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
 #include <media/i2c/saa7115.h>
 #include <asm/div64.h>
 
@@ -74,6 +75,9 @@ enum saa711x_model {
 
 struct saa711x_state {
        struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad pads[DEMOD_NUM_PADS];
+#endif
        struct v4l2_ctrl_handler hdl;
 
        struct {
@@ -1809,6 +1813,9 @@ static int saa711x_probe(struct i2c_client *client,
        struct saa7115_platform_data *pdata;
        int ident;
        char name[CHIP_VER_SIZE + 1];
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       int ret;
+#endif
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -1832,6 +1839,18 @@ static int saa711x_probe(struct i2c_client *client,
        sd = &state->sd;
        v4l2_i2c_subdev_init(sd, client, &saa711x_ops);
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       state->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+       state->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+       state->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+       sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
+
+       ret = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, state->pads);
+       if (ret < 0)
+               return ret;
+#endif
+
        v4l_info(client, "%s found @ 0x%x (%s)\n", name,
                 client->addr << 1, client->adapter->name);
        hdl = &state->hdl;
index 2e14e52ba2e056a071d0f1e4e02538569e446af5..69becc358659cf25dc6a4ccf7ddf0d2dbbe9573d 100644 (file)
@@ -632,7 +632,7 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
        .s_mbus_config  = mt9m001_s_mbus_config,
 };
 
-static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
        .g_skip_top_lines       = mt9m001_g_skip_top_lines,
 };
 
index 3b6eeed2e2b96d9457b0c0a09eeef88d1697c8ef..5c8e3ffe3b27bff0b69c51d218cd68e7f3001d0e 100644 (file)
@@ -728,7 +728,7 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
        .s_mbus_config  = mt9t031_s_mbus_config,
 };
 
-static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
        .g_skip_top_lines       = mt9t031_g_skip_top_lines,
 };
 
index c2ba1fb3694dfe78a874186d50b042c60bdde7dd..2721e583bfa0b239c3f36b7cb8d3efa6affc53a4 100644 (file)
@@ -860,7 +860,7 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
        .s_mbus_config  = mt9v022_s_mbus_config,
 };
 
-static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
        .g_skip_top_lines       = mt9v022_g_skip_top_lines,
 };
 
index 3397eb99c67b0269beecf3cc47c0076758d53885..da7469bc6e56fa8ede646e5aa7846f5a931e044f 100644 (file)
@@ -59,8 +59,7 @@ MODULE_LICENSE("GPL");
 #define EDID_NUM_BLOCKS_MAX 8
 #define EDID_BLOCK_SIZE 128
 
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE  (EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE + 2)
+#define I2C_MAX_XFER_SIZE  (EDID_BLOCK_SIZE + 2)
 
 static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
@@ -97,9 +96,6 @@ struct tc358743_state {
        /* edid  */
        u8 edid_blocks_written;
 
-       /* used by i2c_wr() */
-       u8 wr_data[MAX_XFER_SIZE];
-
        struct v4l2_dv_timings timings;
        u32 mbus_fmt_code;
 
@@ -149,13 +145,15 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
 {
        struct tc358743_state *state = to_state(sd);
        struct i2c_client *client = state->i2c_client;
-       u8 *data = state->wr_data;
        int err, i;
        struct i2c_msg msg;
+       u8 data[I2C_MAX_XFER_SIZE];
 
-       if ((2 + n) > sizeof(state->wr_data))
+       if ((2 + n) > I2C_MAX_XFER_SIZE) {
+               n = I2C_MAX_XFER_SIZE - 2;
                v4l2_warn(sd, "i2c wr reg=%04x: len=%d is too big!\n",
                          reg, 2 + n);
+       }
 
        msg.addr = client->addr;
        msg.buf = data;
@@ -859,15 +857,16 @@ static void tc358743_format_change(struct v4l2_subdev *sd)
        if (tc358743_get_detected_timings(sd, &timings)) {
                enable_stream(sd, false);
 
-               v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n",
+               v4l2_dbg(1, debug, sd, "%s: No signal\n",
                                __func__);
        } else {
                if (!v4l2_match_dv_timings(&state->timings, &timings, 0, false))
                        enable_stream(sd, false);
 
-               v4l2_print_dv_timings(sd->name,
-                               "tc358743_format_change: Format changed. New format: ",
-                               &timings, false);
+               if (debug)
+                       v4l2_print_dv_timings(sd->name,
+                                       "tc358743_format_change: New format: ",
+                                       &timings, false);
        }
 
        if (sd->devnode)
@@ -1581,6 +1580,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
 {
        struct tc358743_state *state = to_state(sd);
        u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+       int i;
 
        v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
                 __func__, edid->pad, edid->start_block, edid->blocks);
@@ -1606,7 +1606,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
                return 0;
        }
 
-       i2c_wr(sd, EDID_RAM, edid->edid, edid_len);
+       for (i = 0; i < edid_len; i += EDID_BLOCK_SIZE)
+               i2c_wr(sd, EDID_RAM + i, edid->edid + i, EDID_BLOCK_SIZE);
 
        state->edid_blocks_written = edid->blocks;
 
index 7fa5f1e4fe3798155011263f9e4bdd0ad6b2955c..7cdd94842938e6d923fc9ed209f98556e242a9bc 100644 (file)
@@ -1001,7 +1001,7 @@ static struct tvp514x_decoder tvp514x_dev = {
 static struct tvp514x_platform_data *
 tvp514x_get_pdata(struct i2c_client *client)
 {
-       struct tvp514x_platform_data *pdata;
+       struct tvp514x_platform_data *pdata = NULL;
        struct v4l2_of_endpoint bus_cfg;
        struct device_node *endpoint;
        unsigned int flags;
@@ -1013,11 +1013,13 @@ tvp514x_get_pdata(struct i2c_client *client)
        if (!endpoint)
                return NULL;
 
+       if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+               goto done;
+
        pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                goto done;
 
-       v4l2_of_parse_endpoint(endpoint, &bus_cfg);
        flags = bus_cfg.bus.parallel.flags;
 
        if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
index 6c3769d44b75ccf6cc113dece96d2ab2e6244b2b..19b52736b24e97236abf849bc1d1650410594d90 100644 (file)
@@ -9,11 +9,14 @@
 #include <linux/slab.h>
 #include <linux/videodev2.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 #include <media/i2c/tvp5150.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-mc.h>
 
 #include "tvp5150_reg.h"
 
@@ -35,6 +38,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
 struct tvp5150 {
        struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad pads[DEMOD_NUM_PADS];
+#endif
        struct v4l2_ctrl_handler hdl;
        struct v4l2_rect rect;
 
@@ -42,6 +48,8 @@ struct tvp5150 {
        u32 input;
        u32 output;
        int enable;
+
+       enum v4l2_mbus_type mbus_type;
 };
 
 static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd)
@@ -772,6 +780,10 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
        v4l2_ctrl_handler_setup(&decoder->hdl);
 
        tvp5150_set_std(sd, decoder->norm);
+
+       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+               tvp5150_write(sd, TVP5150_DATA_RATE_SEL, 0x40);
+
        return 0;
 };
 
@@ -818,17 +830,6 @@ static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd)
        }
 }
 
-static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
-               struct v4l2_subdev_pad_config *cfg,
-               struct v4l2_subdev_mbus_code_enum *code)
-{
-       if (code->pad || code->index)
-               return -EINVAL;
-
-       code->code = MEDIA_BUS_FMT_UYVY8_2X8;
-       return 0;
-}
-
 static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
                struct v4l2_subdev_pad_config *cfg,
                struct v4l2_subdev_format *format)
@@ -844,10 +845,10 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
        tvp5150_reset(sd, 0);
 
        f->width = decoder->rect.width;
-       f->height = decoder->rect.height;
+       f->height = decoder->rect.height / 2;
 
        f->code = MEDIA_BUS_FMT_UYVY8_2X8;
-       f->field = V4L2_FIELD_SEQ_TB;
+       f->field = V4L2_FIELD_ALTERNATE;
        f->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
        v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
@@ -948,10 +949,76 @@ static int tvp5150_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
        return 0;
 }
 
+static int tvp5150_g_mbus_config(struct v4l2_subdev *sd,
+                                struct v4l2_mbus_config *cfg)
+{
+       struct tvp5150 *decoder = to_tvp5150(sd);
+
+       cfg->type = decoder->mbus_type;
+       cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING
+                  | V4L2_MBUS_FIELD_EVEN_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH;
+
+       return 0;
+}
+
+/****************************************************************************
+                       V4L2 subdev pad ops
+ ****************************************************************************/
+static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
+               struct v4l2_subdev_pad_config *cfg,
+               struct v4l2_subdev_mbus_code_enum *code)
+{
+       if (code->pad || code->index)
+               return -EINVAL;
+
+       code->code = MEDIA_BUS_FMT_UYVY8_2X8;
+       return 0;
+}
+
+static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
+                                  struct v4l2_subdev_pad_config *cfg,
+                                  struct v4l2_subdev_frame_size_enum *fse)
+{
+       struct tvp5150 *decoder = to_tvp5150(sd);
+
+       if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_UYVY8_2X8)
+               return -EINVAL;
+
+       fse->code = MEDIA_BUS_FMT_UYVY8_2X8;
+       fse->min_width = decoder->rect.width;
+       fse->max_width = decoder->rect.width;
+       fse->min_height = decoder->rect.height / 2;
+       fse->max_height = decoder->rect.height / 2;
+
+       return 0;
+}
+
 /****************************************************************************
                        I2C Command
  ****************************************************************************/
 
+static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
+{
+       struct tvp5150 *decoder = to_tvp5150(sd);
+       /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
+       int val = 0x09;
+
+       /* Output format: 8-bit 4:2:2 YUV with discrete sync */
+       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+               val = 0x0d;
+
+       /* Initializes TVP5150 to its default values */
+       /* # set PCLK (27MHz) */
+       tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+
+       if (enable)
+               tvp5150_write(sd, TVP5150_MISC_CTL, val);
+       else
+               tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+
+       return 0;
+}
+
 static int tvp5150_s_routing(struct v4l2_subdev *sd,
                             u32 input, u32 output, u32 config)
 {
@@ -1073,10 +1140,12 @@ static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
 
 static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
        .s_std = tvp5150_s_std,
+       .s_stream = tvp5150_s_stream,
        .s_routing = tvp5150_s_routing,
        .s_crop = tvp5150_s_crop,
        .g_crop = tvp5150_g_crop,
        .cropcap = tvp5150_cropcap,
+       .g_mbus_config = tvp5150_g_mbus_config,
 };
 
 static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
@@ -1088,6 +1157,7 @@ static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
 
 static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = {
        .enum_mbus_code = tvp5150_enum_mbus_code,
+       .enum_frame_size = tvp5150_enum_frame_size,
        .set_fmt = tvp5150_fill_fmt,
        .get_fmt = tvp5150_fill_fmt,
 };
@@ -1105,63 +1175,167 @@ static const struct v4l2_subdev_ops tvp5150_ops = {
                        I2C Client & Driver
  ****************************************************************************/
 
+static int tvp5150_detect_version(struct tvp5150 *core)
+{
+       struct v4l2_subdev *sd = &core->sd;
+       struct i2c_client *c = v4l2_get_subdevdata(sd);
+       unsigned int i;
+       u16 dev_id;
+       u16 rom_ver;
+       u8 regs[4];
+       int res;
+
+       /*
+        * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
+        * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER
+        */
+       for (i = 0; i < 4; i++) {
+               res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
+               if (res < 0)
+                       return res;
+               regs[i] = res;
+       }
+
+       dev_id = (regs[0] << 8) | regs[1];
+       rom_ver = (regs[2] << 8) | regs[3];
+
+       v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
+                 dev_id, regs[2], regs[3], c->addr << 1, c->adapter->name);
+
+       if (dev_id == 0x5150 && rom_ver == 0x0321) { /* TVP51510A */
+               v4l2_info(sd, "tvp5150a detected.\n");
+       } else if (dev_id == 0x5150 && rom_ver == 0x0400) { /* TVP5150AM1 */
+               v4l2_info(sd, "tvp5150am1 detected.\n");
+
+               /* ITU-T BT.656.4 timing */
+               tvp5150_write(sd, TVP5150_REV_SELECT, 0);
+       } else if (dev_id == 0x5151 && rom_ver == 0x0100) { /* TVP5151 */
+               v4l2_info(sd, "tvp5151 detected.\n");
+       } else {
+               v4l2_info(sd, "*** unknown tvp%04x chip detected.\n", dev_id);
+       }
+
+       return 0;
+}
+
+static int tvp5150_init(struct i2c_client *c)
+{
+       struct gpio_desc *pdn_gpio;
+       struct gpio_desc *reset_gpio;
+
+       pdn_gpio = devm_gpiod_get_optional(&c->dev, "pdn", GPIOD_OUT_HIGH);
+       if (IS_ERR(pdn_gpio))
+               return PTR_ERR(pdn_gpio);
+
+       if (pdn_gpio) {
+               gpiod_set_value_cansleep(pdn_gpio, 0);
+               /* Delay time between power supplies active and reset */
+               msleep(20);
+       }
+
+       reset_gpio = devm_gpiod_get_optional(&c->dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(reset_gpio))
+               return PTR_ERR(reset_gpio);
+
+       if (reset_gpio) {
+               /* RESETB pulse duration */
+               ndelay(500);
+               gpiod_set_value_cansleep(reset_gpio, 0);
+               /* Delay time between end of reset to I2C active */
+               usleep_range(200, 250);
+       }
+
+       return 0;
+}
+
+static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+{
+       struct v4l2_of_endpoint bus_cfg;
+       struct device_node *ep;
+       unsigned int flags;
+       int ret = 0;
+
+       ep = of_graph_get_next_endpoint(np, NULL);
+       if (!ep)
+               return -EINVAL;
+
+       ret = v4l2_of_parse_endpoint(ep, &bus_cfg);
+       if (ret)
+               goto err;
+
+       flags = bus_cfg.bus.parallel.flags;
+
+       if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL &&
+           !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH &&
+             flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH &&
+             flags & V4L2_MBUS_FIELD_EVEN_LOW))
+               return -EINVAL;
+
+       decoder->mbus_type = bus_cfg.bus_type;
+
+err:
+       of_node_put(ep);
+       return ret;
+}
+
 static int tvp5150_probe(struct i2c_client *c,
                         const struct i2c_device_id *id)
 {
        struct tvp5150 *core;
        struct v4l2_subdev *sd;
-       int tvp5150_id[4];
-       int i, res;
+       struct device_node *np = c->dev.of_node;
+       int res;
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(c->adapter,
             I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
                return -EIO;
 
+       res = tvp5150_init(c);
+       if (res)
+               return res;
+
        core = devm_kzalloc(&c->dev, sizeof(*core), GFP_KERNEL);
        if (!core)
                return -ENOMEM;
+
        sd = &core->sd;
-       v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
 
-       /* 
-        * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
-        * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER 
-        */
-       for (i = 0; i < 4; i++) {
-               res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
-               if (res < 0)
+       if (IS_ENABLED(CONFIG_OF) && np) {
+               res = tvp5150_parse_dt(core, np);
+               if (res) {
+                       v4l2_err(sd, "DT parsing error: %d\n", res);
                        return res;
-               tvp5150_id[i] = res;
+               }
+       } else {
+               /* Default to BT.656 embedded sync */
+               core->mbus_type = V4L2_MBUS_BT656;
        }
 
-       v4l_info(c, "chip found @ 0x%02x (%s)\n",
-                c->addr << 1, c->adapter->name);
+       v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+       sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 
-       if (tvp5150_id[2] == 4 && tvp5150_id[3] == 0) { /* Is TVP5150AM1 */
-               v4l2_info(sd, "tvp%02x%02xam1 detected.\n",
-                         tvp5150_id[0], tvp5150_id[1]);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       core->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+       core->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+       core->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
 
-               /* ITU-T BT.656.4 timing */
-               tvp5150_write(sd, TVP5150_REV_SELECT, 0);
-       } else {
-               /* Is TVP5150A */
-               if (tvp5150_id[2] == 3 || tvp5150_id[3] == 0x21) {
-                       v4l2_info(sd, "tvp%02x%02xa detected.\n",
-                                 tvp5150_id[0], tvp5150_id[1]);
-               } else {
-                       v4l2_info(sd, "*** unknown tvp%02x%02x chip detected.\n",
-                                 tvp5150_id[0], tvp5150_id[1]);
-                       v4l2_info(sd, "*** Rom ver is %d.%d\n",
-                                 tvp5150_id[2], tvp5150_id[3]);
-               }
-       }
+       sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
+
+       res = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, core->pads);
+       if (res < 0)
+               return res;
+#endif
+
+       res = tvp5150_detect_version(core);
+       if (res < 0)
+               return res;
 
        core->norm = V4L2_STD_ALL;      /* Default is autodetect */
        core->input = TVP5150_COMPOSITE1;
        core->enable = 1;
 
-       v4l2_ctrl_handler_init(&core->hdl, 4);
+       v4l2_ctrl_handler_init(&core->hdl, 5);
        v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
                        V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
        v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
@@ -1170,6 +1344,9 @@ static int tvp5150_probe(struct i2c_client *c,
                        V4L2_CID_SATURATION, 0, 255, 1, 128);
        v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
                        V4L2_CID_HUE, -128, 127, 1, 0);
+       v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+                       V4L2_CID_PIXEL_RATE, 27000000,
+                       27000000, 1, 27000000);
        sd->ctrl_handler = &core->hdl;
        if (core->hdl.error) {
                res = core->hdl.error;
@@ -1221,8 +1398,17 @@ static const struct i2c_device_id tvp5150_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, tvp5150_id);
 
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tvp5150_of_match[] = {
+       { .compatible = "ti,tvp5150", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tvp5150_of_match);
+#endif
+
 static struct i2c_driver tvp5150_driver = {
        .driver = {
+               .of_match_table = of_match_ptr(tvp5150_of_match),
                .name   = "tvp5150",
        },
        .probe          = tvp5150_probe,
index 83c79fa5f61de1b7155cb42ff69672f87036b5ba..4df640c3aa40fde3eecf4206233e1aa04745f12d 100644 (file)
@@ -894,7 +894,7 @@ static struct tvp7002_config *
 tvp7002_get_pdata(struct i2c_client *client)
 {
        struct v4l2_of_endpoint bus_cfg;
-       struct tvp7002_config *pdata;
+       struct tvp7002_config *pdata = NULL;
        struct device_node *endpoint;
        unsigned int flags;
 
@@ -905,11 +905,13 @@ tvp7002_get_pdata(struct i2c_client *client)
        if (!endpoint)
                return NULL;
 
+       if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+               goto done;
+
        pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                goto done;
 
-       v4l2_of_parse_endpoint(endpoint, &bus_cfg);
        flags = bus_cfg.bus.parallel.flags;
 
        if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
index 4b564f17f618e38664d9cdffc8992fe6cf61a737..90b693f4e2ab84b4da6b68bf79f5b24fab65149d 100644 (file)
@@ -124,7 +124,7 @@ static int vpx3220_fp_write(struct v4l2_subdev *sd, u8 fpaddr, u16 data)
        return 0;
 }
 
-static u16 vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr)
+static int vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        s16 data;
index 7dae0ac0f3aec12c5d44875111d3a69818e43c92..4d1c13de494bfb9f7a4957311225211250480b5d 100644 (file)
@@ -234,7 +234,6 @@ static long media_device_setup_link(struct media_device *mdev,
        return ret;
 }
 
-#if 0 /* Let's postpone it to Kernel 4.6 */
 static long __media_device_get_topology(struct media_device *mdev,
                                      struct media_v2_topology *topo)
 {
@@ -390,7 +389,6 @@ static long media_device_get_topology(struct media_device *mdev,
 
        return 0;
 }
-#endif
 
 static long media_device_ioctl(struct file *filp, unsigned int cmd,
                               unsigned long arg)
@@ -424,14 +422,13 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
                mutex_unlock(&dev->graph_mutex);
                break;
 
-#if 0 /* Let's postpone it to Kernel 4.6 */
        case MEDIA_IOC_G_TOPOLOGY:
                mutex_lock(&dev->graph_mutex);
                ret = media_device_get_topology(dev,
                                (struct media_v2_topology __user *)arg);
                mutex_unlock(&dev->graph_mutex);
                break;
-#endif
+
        default:
                ret = -ENOIOCTLCMD;
        }
@@ -480,9 +477,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
        case MEDIA_IOC_DEVICE_INFO:
        case MEDIA_IOC_ENUM_ENTITIES:
        case MEDIA_IOC_SETUP_LINK:
-#if 0 /* Let's postpone it to Kernel 4.6 */
        case MEDIA_IOC_G_TOPOLOGY:
-#endif
                return media_device_ioctl(filp, cmd, arg);
 
        case MEDIA_IOC_ENUM_LINKS32:
index cea35bf2001170d7ca0471e1d51e5fe9eae8211e..29409f440f1caef38f82694b382d3b66a3895ffb 100644 (file)
@@ -181,6 +181,7 @@ static int media_open(struct inode *inode, struct file *filp)
                ret = mdev->fops->open(filp);
                if (ret) {
                        put_device(&mdev->dev);
+                       filp->private_data = NULL;
                        return ret;
                }
        }
index e89d85a7d31b56286a6d8c9d492b5565dc6a4d43..f2e43603d6d24f6a3c27105369187a73d61d8175 100644 (file)
@@ -452,9 +452,12 @@ error:
        media_entity_graph_walk_start(graph, entity_err);
 
        while ((entity_err = media_entity_graph_walk_next(graph))) {
-               entity_err->stream_count--;
-               if (entity_err->stream_count == 0)
-                       entity_err->pipe = NULL;
+               /* don't let the stream_count go negative */
+               if (entity->stream_count > 0) {
+                       entity_err->stream_count--;
+                       if (entity_err->stream_count == 0)
+                               entity_err->pipe = NULL;
+               }
 
                /*
                 * We haven't increased stream_count further than this
@@ -486,9 +489,12 @@ void media_entity_pipeline_stop(struct media_entity *entity)
        media_entity_graph_walk_start(graph, entity);
 
        while ((entity = media_entity_graph_walk_next(graph))) {
-               entity->stream_count--;
-               if (entity->stream_count == 0)
-                       entity->pipe = NULL;
+               /* don't let the stream_count go negative */
+               if (entity->stream_count > 0) {
+                       entity->stream_count--;
+                       if (entity->stream_count == 0)
+                               entity->pipe = NULL;
+               }
        }
 
        if (!--pipe->streaming_count)
index 8b5e0b3a92a0c4a408af8bc50217279065b78803..4cac1fc233f28fd0a99e58f923a216482b63d90a 100644 (file)
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(debug,
 
 #define DRIVER_VERSION "0.1"
 #define DRIVER_NAME "flexcop-pci"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
 
 struct flexcop_pci {
        struct pci_dev *pdev;
index 9400e996087b6a5eb5076cd7e0ab399f2bed0e9e..2c412377507bc16f13468f524b566618f5d7f76d 100644 (file)
@@ -186,7 +186,7 @@ MODULE_VERSION(BTTV_VERSION);
 static ssize_t show_card(struct device *cd,
                         struct device_attribute *attr, char *buf)
 {
-       struct video_device *vfd = container_of(cd, struct video_device, dev);
+       struct video_device *vfd = to_video_device(cd);
        struct bttv *btv = video_get_drvdata(vfd);
        return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET);
 }
@@ -1726,22 +1726,15 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id id)
        struct bttv_fh *fh  = priv;
        struct bttv *btv = fh->btv;
        unsigned int i;
-       int err = 0;
 
        for (i = 0; i < BTTV_TVNORMS; i++)
                if (id & bttv_tvnorms[i].v4l2_id)
                        break;
-       if (i == BTTV_TVNORMS) {
-               err = -EINVAL;
-               goto err;
-       }
-
+       if (i == BTTV_TVNORMS)
+               return -EINVAL;
        btv->std = id;
        set_tvnorm(btv, i);
-
-err:
-
-       return err;
+       return 0;
 }
 
 static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
@@ -1770,12 +1763,9 @@ static int bttv_enum_input(struct file *file, void *priv,
 {
        struct bttv_fh *fh = priv;
        struct bttv *btv = fh->btv;
-       int rc = 0;
 
-       if (i->index >= bttv_tvcards[btv->c.type].video_inputs) {
-               rc = -EINVAL;
-               goto err;
-       }
+       if (i->index >= bttv_tvcards[btv->c.type].video_inputs)
+               return -EINVAL;
 
        i->type     = V4L2_INPUT_TYPE_CAMERA;
        i->audioset = 0;
@@ -1799,10 +1789,7 @@ static int bttv_enum_input(struct file *file, void *priv,
        }
 
        i->std = BTTV_NORMS;
-
-err:
-
-       return rc;
+       return 0;
 }
 
 static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
index d407244fd1bce3339b3fdbc13ea3e4801114f19a..e69d338ab9be0e0f51b85b05e2e4071c5982c802 100644 (file)
@@ -318,7 +318,7 @@ static int microtune_mt7202dtf_request_firmware(struct dvb_frontend* fe, const s
        return request_firmware(fw, name, &bt->bt->dev->dev);
 }
 
-static struct sp887x_config microtune_mt7202dtf_config = {
+static const struct sp887x_config microtune_mt7202dtf_config = {
        .demod_address = 0x70,
        .request_firmware = microtune_mt7202dtf_request_firmware,
 };
@@ -458,7 +458,7 @@ static void or51211_sleep(struct dvb_frontend * fe)
        bttv_write_gpio(bt->bttv_nr, 0x0001, 0x0000);
 }
 
-static struct or51211_config or51211_config = {
+static const struct or51211_config or51211_config = {
        .demod_address = 0x15,
        .request_firmware = or51211_request_firmware,
        .setmode = or51211_setmode,
index 9d5b314142f1b49daabb78985f6d13a2f982405a..6e995ef8c37e80872be886d5aa8a7027915ad992 100644 (file)
@@ -690,7 +690,7 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type)
        struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
        struct stv6110x_config *tunerconf = (input->nr & 1) ?
                &stv6110b : &stv6110a;
-       struct stv6110x_devctl *ctl;
+       const struct stv6110x_devctl *ctl;
 
        ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
        if (!ctl) {
index 525ebfefeee829c644a74d279d888a7fc60e9ca3..c94cecd2aa4068c5d7a4922800745e52b5a8dd98 100644 (file)
@@ -771,10 +771,9 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
 
        /* allocate device context */
        ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
-
        if (!ndev)
                goto dev_alloc_err;
-       memset(ndev, 0, sizeof(*ndev));
+
        ndev->old_fw = old_firmware;
        ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
        if (!ndev->wq) {
index 039bed3cc919d2f5624e1635327e126abdfca064..4e783a68bf4aef998cf2c26353510a367dba338d 100644 (file)
@@ -57,7 +57,7 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
                chan->dev->card_info->fe_config[chan->number];
        struct stv6110x_config *tunerconf = (struct stv6110x_config *)
                chan->dev->card_info->tuner_config[chan->number];
-       struct stv6110x_devctl *ctl;
+       const struct stv6110x_devctl *ctl;
 
        /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
        if (chan->number < 2)
index 1d2c310ce838e5fb415ebd4d3b533ac11b8c4b9b..94f8162444071ca3168af652b0c260acaeeab021 100644 (file)
@@ -1211,6 +1211,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
 
 static int alsa_device_exit(struct saa7134_dev *dev)
 {
+       if (!snd_saa7134_cards[dev->nr])
+               return 1;
 
        snd_card_free(snd_saa7134_cards[dev->nr]);
        snd_saa7134_cards[dev->nr] = NULL;
@@ -1260,7 +1262,8 @@ static void saa7134_alsa_exit(void)
        int idx;
 
        for (idx = 0; idx < SNDRV_CARDS; idx++) {
-               snd_card_free(snd_saa7134_cards[idx]);
+               if (snd_saa7134_cards[idx])
+                       snd_card_free(snd_saa7134_cards[idx]);
        }
 
        saa7134_dmasound_init = NULL;
index 56b932c97196d68cc76dcdfd007dcf7b937bd40f..ca417a454d6787aecc77a0a338137dce78d7acb0 100644 (file)
@@ -189,6 +189,7 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = {
        .vidioc_querybuf                = vb2_ioctl_querybuf,
        .vidioc_qbuf                    = vb2_ioctl_qbuf,
        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
+       .vidioc_expbuf                  = vb2_ioctl_expbuf,
        .vidioc_streamon                = vb2_ioctl_streamon,
        .vidioc_streamoff               = vb2_ioctl_streamoff,
        .vidioc_g_frequency             = saa7134_g_frequency,
@@ -286,7 +287,7 @@ static int empress_init(struct saa7134_dev *dev)
         * transfers that do not start at the beginning of a page. A USERPTR
         * can start anywhere in a page, so USERPTR support is a no-go.
         */
-       q->io_modes = VB2_MMAP | VB2_READ;
+       q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
        q->drv_priv = &dev->ts_q;
        q->ops = &saa7134_empress_qops;
        q->gfp_flags = GFP_DMA32;
index 8a2abb34186b3dfa08b3ac6a22af298b757bc132..2799538e2d7e043ac70a4985daee314a9392f18b 100644 (file)
@@ -378,7 +378,7 @@ static int saa7134_go7007_send_firmware(struct go7007 *go, u8 *data, int len)
        return 0;
 }
 
-static struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
+static const struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
        .interface_reset        = saa7134_go7007_interface_reset,
        .write_interrupt        = saa7134_go7007_write_interrupt,
        .read_interrupt         = saa7134_go7007_read_interrupt,
index a63c1366a64efad4ad837a5ae927a3417b39d580..7a42d3ae3ac933287884480beeb9d7623ce1abbc 100644 (file)
@@ -1906,6 +1906,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
        .vidioc_querybuf                = vb2_ioctl_querybuf,
        .vidioc_qbuf                    = vb2_ioctl_qbuf,
        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
+       .vidioc_expbuf                  = vb2_ioctl_expbuf,
        .vidioc_s_std                   = saa7134_s_std,
        .vidioc_g_std                   = saa7134_g_std,
        .vidioc_querystd                = saa7134_querystd,
@@ -2089,7 +2090,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
         * USERPTR support is a no-go unless the application knows about these
         * limitations and has special support for this.
         */
-       q->io_modes = VB2_MMAP | VB2_READ;
+       q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
        if (saa7134_userptr)
                q->io_modes |= VB2_USERPTR;
        q->drv_priv = &dev->video_q;
index a69dc6a0752bb94fc8352c133571b31f37599f99..18d229fa65cfb3839e61a60cd8f4dfebd8fc55eb 100644 (file)
@@ -1739,7 +1739,7 @@ static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct fir
 #endif
 }
 
-static struct sp8870_config alps_tdlb7_config = {
+static const struct sp8870_config alps_tdlb7_config = {
 
        .demod_address = 0x71,
        .request_firmware = alps_tdlb7_request_firmware,
index de54310a26603b23a152b174b3d04c2ce7cd58df..9f48100227f150f216c9ecfaf8c704d73ae6f3b8 100644 (file)
@@ -644,7 +644,7 @@ static void frontend_init(struct budget *budget)
                }
 
        case 0x101c: { /* TT S2-1600 */
-                       struct stv6110x_devctl *ctl;
+                       const struct stv6110x_devctl *ctl;
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
                        msleep(50);
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
@@ -697,7 +697,7 @@ static void frontend_init(struct budget *budget)
                break;
 
        case 0x1020: { /* Omicom S2 */
-                       struct stv6110x_devctl *ctl;
+                       const struct stv6110x_devctl *ctl;
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
                        msleep(50);
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
index 526359447ff90fda699b032e0dbed4d6a06e577e..fd4fcd5a7184ce08f31d1ea11b8ea730496121ae 100644 (file)
@@ -120,6 +120,18 @@ source "drivers/media/platform/s5p-tv/Kconfig"
 source "drivers/media/platform/am437x/Kconfig"
 source "drivers/media/platform/xilinx/Kconfig"
 
+config VIDEO_TI_CAL
+       tristate "TI CAL (Camera Adaptation Layer) driver"
+       depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on SOC_DRA7XX || COMPILE_TEST
+       select VIDEOBUF2_DMA_CONTIG
+       default n
+       ---help---
+         Support for the TI CAL (Camera Adaptation Layer) block
+         found on DRA72X SoC.
+         In TI Technical Reference Manual this module is referred as
+         Camera Interface Subsystem (CAMSS).
+
 endif # V4L_PLATFORM_DRIVERS
 
 menuconfig V4L_MEM2MEM_DRIVERS
@@ -215,6 +227,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
 config VIDEO_STI_BDISP
        tristate "STMicroelectronics BDISP 2D blitter driver"
        depends on VIDEO_DEV && VIDEO_V4L2
+       depends on HAS_DMA
        depends on ARCH_STI || COMPILE_TEST
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index efa0295af87bfe089acb526329b1601e95756c89..028a7233096baff93845935b4be126a65dadd32b 100644 (file)
@@ -18,6 +18,8 @@ obj-$(CONFIG_VIDEO_VIM2M)             += vim2m.o
 
 obj-$(CONFIG_VIDEO_TI_VPE)             += ti-vpe/
 
+obj-$(CONFIG_VIDEO_TI_CAL)             += ti-vpe/
+
 obj-$(CONFIG_VIDEO_MX2_EMMAPRP)                += mx2_emmaprp.o
 obj-$(CONFIG_VIDEO_CODA)               += coda/
 
index 7d28899f89ce16c00cd9f00f424a948fa57e2ae8..6efe9d0029611251ba0621210dbeee41606abb08 100644 (file)
@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
 
        /* Calculate bytesused field */
        if (dst_buf->sequence == 0) {
-               vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
                                        ctx->vpu_header_size[0] +
                                        ctx->vpu_header_size[1] +
                                        ctx->vpu_header_size[2]);
index ffbefdff6b5ef460d4a7c1abce024ed9841d8b4c..6fba32bec97455b8a98ddd255252b88e178ed0ea 100644 (file)
@@ -261,7 +261,7 @@ static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
         */
        if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
                if (fpc_physaddr != NULL) {
-                       free_pages((unsigned long)fpc_physaddr,
+                       free_pages((unsigned long)fpc_virtaddr,
                                   get_order
                                   (config_params->fault_pxl.fp_num *
                                   FP_NUM_BYTES));
index 93782f15b8252092ba31a81b8c135de581f8dc4a..a600e32e25430a8ddc82e090f373a87d4e335b57 100644 (file)
@@ -700,7 +700,7 @@ static unsigned int gsc_m2m_poll(struct file *file,
 {
        struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
        struct gsc_dev *gsc = ctx->gsc_dev;
-       int ret;
+       unsigned int ret;
 
        if (mutex_lock_interruptible(&gsc->lock))
                return -ERESTARTSYS;
index 40423c6c5324a8e1cf2573b2acffeb566ad74655..57d42c6172c576757b7724906852c7ec9ac34911 100644 (file)
@@ -1,6 +1,6 @@
 
 config VIDEO_SAMSUNG_EXYNOS4_IS
-       bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+       tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
        depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
        depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        depends on OF && COMMON_CLK
index 49658ca39e5100d70ed9011eea6adb4cd02c3c55..979c388ebf60cdd1a497aac2570aab05c5e800a6 100644 (file)
@@ -631,6 +631,12 @@ static int fimc_is_hw_open_sensor(struct fimc_is *is,
 
        fimc_is_mem_barrier();
 
+       /*
+        * Some user space use cases hang up here without this
+        * empirically chosen delay.
+        */
+       udelay(100);
+
        mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
        mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
        mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
index bf9261eb57a15c37a8b82fb3197a94649e4cc9c6..c0816728cbfe1d2be7c6eb5c987a4ac3b8213a74 100644 (file)
@@ -218,8 +218,8 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
                                                        ivb->dma_addr[i];
 
                        isp_dbg(2, &video->ve.vdev,
-                               "dma_buf %pad (%d/%d/%d) addr: %pad\n",
-                               &buf_index, ivb->index, i, vb->index,
+                               "dma_buf %d (%d/%d/%d) addr: %pad\n",
+                               buf_index, ivb->index, i, vb->index,
                                &ivb->dma_addr[i]);
                }
 
index f3b2dd30ec7769b3199da9ed2694df5886004616..feb521f28e14857b8344d7320769c87183cd17ac 100644 (file)
@@ -185,6 +185,37 @@ error:
        return ret;
 }
 
+/**
+ * __fimc_pipeline_enable - enable power of all pipeline subdevs
+ *                         and the sensor clock
+ * @ep: video pipeline structure
+ * @fmd: fimc media device
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
+                                 struct fimc_md *fmd)
+{
+       struct fimc_pipeline *p = to_fimc_pipeline(ep);
+       int ret;
+
+       /* Enable PXLASYNC clock if this pipeline includes FIMC-IS */
+       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
+               ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = fimc_pipeline_s_power(p, 1);
+       if (!ret)
+               return 0;
+
+       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+               clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+       return ret;
+}
+
 /**
  * __fimc_pipeline_open - update the pipeline information, enable power
  *                        of all pipeline subdevs and the sensor clock
@@ -199,7 +230,6 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
        struct fimc_md *fmd = entity_to_fimc_mdev(me);
        struct fimc_pipeline *p = to_fimc_pipeline(ep);
        struct v4l2_subdev *sd;
-       int ret;
 
        if (WARN_ON(p == NULL || me == NULL))
                return -EINVAL;
@@ -208,24 +238,16 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
                fimc_pipeline_prepare(p, me);
 
        sd = p->subdevs[IDX_SENSOR];
-       if (sd == NULL)
-               return -EINVAL;
-
-       /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
-       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
-               ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
-               if (ret < 0)
-                       return ret;
-       }
-
-       ret = fimc_pipeline_s_power(p, 1);
-       if (!ret)
+       if (sd == NULL) {
+               pr_warn("%s(): No sensor subdev\n", __func__);
+               /*
+                * Pipeline open cannot fail so as to make it possible
+                * for the user space to configure the pipeline.
+                */
                return 0;
+       }
 
-       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
-               clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
-
-       return ret;
+       return __fimc_pipeline_enable(ep, fmd);
 }
 
 /**
@@ -269,10 +291,43 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
                { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
        };
        struct fimc_pipeline *p = to_fimc_pipeline(ep);
+       struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
+       enum fimc_subdev_index sd_id;
        int i, ret = 0;
 
-       if (p->subdevs[IDX_SENSOR] == NULL)
-               return -ENODEV;
+       if (p->subdevs[IDX_SENSOR] == NULL) {
+               if (!fmd->user_subdev_api) {
+                       /*
+                        * Sensor must be already discovered if we
+                        * aren't in the user_subdev_api mode
+                        */
+                       return -ENODEV;
+               }
+
+               /* Get pipeline sink entity */
+               if (p->subdevs[IDX_FIMC])
+                       sd_id = IDX_FIMC;
+               else if (p->subdevs[IDX_IS_ISP])
+                       sd_id = IDX_IS_ISP;
+               else if (p->subdevs[IDX_FLITE])
+                       sd_id = IDX_FLITE;
+               else
+                       return -ENODEV;
+
+               /*
+                * Sensor could have been linked between open and STREAMON -
+                * check if this is the case.
+                */
+               fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
+
+               if (p->subdevs[IDX_SENSOR] == NULL)
+                       return -ENODEV;
+
+               ret = __fimc_pipeline_enable(ep, fmd);
+               if (ret < 0)
+                       return ret;
+
+       }
 
        for (i = 0; i < IDX_MAX; i++) {
                unsigned int idx = seq[on][i];
@@ -282,8 +337,10 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
                if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
                        goto error;
        }
+
        return 0;
 error:
+       fimc_pipeline_s_power(p, !on);
        for (; i >= 0; i--) {
                unsigned int idx = seq[on][i];
                v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
@@ -332,13 +389,19 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
        struct fimc_source_info *pd = &fmd->sensor[index].pdata;
        struct device_node *rem, *ep, *np;
        struct v4l2_of_endpoint endpoint;
+       int ret;
 
        /* Assume here a port node can have only one endpoint node. */
        ep = of_get_next_child(port, NULL);
        if (!ep)
                return 0;
 
-       v4l2_of_parse_endpoint(ep, &endpoint);
+       ret = v4l2_of_parse_endpoint(ep, &endpoint);
+       if (ret) {
+               of_node_put(ep);
+               return ret;
+       }
+
        if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
                return -EINVAL;
 
@@ -429,8 +492,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
                        continue;
 
                ret = fimc_md_parse_port_node(fmd, port, index);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(node);
                        goto rpm_put;
+               }
                index++;
        }
 
@@ -441,8 +506,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
 
        for_each_child_of_node(ports, node) {
                ret = fimc_md_parse_port_node(fmd, node, index);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(node);
                        break;
+               }
                index++;
        }
 rpm_put:
@@ -650,8 +717,10 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd,
                        ret = fimc_md_register_platform_entity(fmd, pdev,
                                                        plat_entity);
                put_device(&pdev->dev);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(node);
                        break;
+               }
        }
 
        return ret;
index ac5e50e595be83cd7ac973a6eab1d5866fca1d1b..bd5c46c3d4b7bedc362c29cb473c8206f84026b3 100644 (file)
@@ -736,6 +736,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
 {
        struct device_node *node = pdev->dev.of_node;
        struct v4l2_of_endpoint endpoint;
+       int ret;
 
        if (of_property_read_u32(node, "clock-frequency",
                                 &state->clk_frequency))
@@ -751,7 +752,9 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
                return -EINVAL;
        }
        /* Get port node and validate MIPI-CSI channel id. */
-       v4l2_of_parse_endpoint(node, &endpoint);
+       ret = v4l2_of_parse_endpoint(node, &endpoint);
+       if (ret)
+               goto err;
 
        state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
        if (state->index >= CSIS_MAX_ENTITIES)
@@ -764,9 +767,10 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
                                        "samsung,csis-wclk");
 
        state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
-       of_node_put(node);
 
-       return 0;
+err:
+       of_node_put(node);
+       return ret;
 }
 
 static int s5pcsis_pm_resume(struct device *dev, bool runtime);
index 0bcfa553c1aa254b63789980d50a1dc5a0149d5f..f9e5245f26acca69a0113b05d950d2611bb6a3d0 100644 (file)
@@ -449,7 +449,7 @@ void omap3isp_configure_bridge(struct isp_device *isp,
        case CCDC_INPUT_PARALLEL:
                ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
                ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
-               shift += parcfg->data_lane_shift * 2;
+               shift += parcfg->data_lane_shift;
                break;
 
        case CCDC_INPUT_CSI2A:
@@ -2235,8 +2235,11 @@ static int isp_of_parse_node(struct device *dev, struct device_node *node,
        struct isp_bus_cfg *buscfg = &isd->bus;
        struct v4l2_of_endpoint vep;
        unsigned int i;
+       int ret;
 
-       v4l2_of_parse_endpoint(node, &vep);
+       ret = v4l2_of_parse_endpoint(node, &vep);
+       if (ret)
+               return ret;
 
        dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name,
                vep.base.port);
@@ -2528,12 +2531,13 @@ static int isp_probe(struct platform_device *pdev)
        }
 
        /* Interrupt */
-       isp->irq_num = platform_get_irq(pdev, 0);
-       if (isp->irq_num <= 0) {
+       ret = platform_get_irq(pdev, 0);
+       if (ret <= 0) {
                dev_err(isp->dev, "No IRQ resource\n");
                ret = -ENODEV;
                goto error_iommu;
        }
+       isp->irq_num = ret;
 
        if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
                             "OMAP3 ISP", isp)) {
@@ -2599,6 +2603,7 @@ static const struct of_device_id omap3isp_of_table[] = {
        { .compatible = "ti,omap3-isp" },
        { },
 };
+MODULE_DEVICE_TABLE(of, omap3isp_of_table);
 
 static struct platform_driver omap3isp_driver = {
        .probe = isp_probe,
index bb3974c98e37ebdfc1306534fd9d0555b67273de..882310eb45ccfa51e309769058a21433aa485655 100644 (file)
@@ -2421,7 +2421,7 @@ static int ccdc_link_validate(struct v4l2_subdev *sd,
                        &((struct isp_bus_cfg *)
                          media_entity_to_v4l2_subdev(link->source->entity)
                          ->host_priv)->bus.parallel;
-               parallel_shift = parcfg->data_lane_shift * 2;
+               parallel_shift = parcfg->data_lane_shift;
        } else {
                parallel_shift = 0;
        }
index 84a96670e2e71483442a584d2bcd124f4f0c6da4..ac30a0f837801ae57d3227429e4492a23306a0a9 100644 (file)
@@ -1480,13 +1480,6 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
        struct isp_buffer *buffer;
        int restart = 0;
 
-       if (prev->input == PREVIEW_INPUT_MEMORY) {
-               buffer = omap3isp_video_buffer_next(&prev->video_in);
-               if (buffer != NULL)
-                       preview_set_inaddr(prev, buffer->dma);
-               pipe->state |= ISP_PIPELINE_IDLE_INPUT;
-       }
-
        if (prev->output & PREVIEW_OUTPUT_MEMORY) {
                buffer = omap3isp_video_buffer_next(&prev->video_out);
                if (buffer != NULL) {
@@ -1496,6 +1489,13 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
                pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
        }
 
+       if (prev->input == PREVIEW_INPUT_MEMORY) {
+               buffer = omap3isp_video_buffer_next(&prev->video_in);
+               if (buffer != NULL)
+                       preview_set_inaddr(prev, buffer->dma);
+               pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+       }
+
        switch (prev->state) {
        case ISP_PIPELINE_STREAM_SINGLESHOT:
                if (isp_pipeline_ready(pipe))
index 994dfc0813f6b7556fb43b05b8536b29f4c6f8b1..2aff755ff77c37741d540181ce25e32dc8bf0ba1 100644 (file)
@@ -434,10 +434,68 @@ static void isp_video_buffer_queue(struct vb2_buffer *buf)
        }
 }
 
+/*
+ * omap3isp_video_return_buffers - Return all queued buffers to videobuf2
+ * @video: ISP video object
+ * @state: new state for the returned buffers
+ *
+ * Return all buffers queued on the video node to videobuf2 in the given state.
+ * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error
+ * when starting the stream, or VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The function must be called with the video irqlock held.
+ */
+static void omap3isp_video_return_buffers(struct isp_video *video,
+                                         enum vb2_buffer_state state)
+{
+       while (!list_empty(&video->dmaqueue)) {
+               struct isp_buffer *buf;
+
+               buf = list_first_entry(&video->dmaqueue,
+                                      struct isp_buffer, irqlist);
+               list_del(&buf->irqlist);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+       }
+}
+
+static int isp_video_start_streaming(struct vb2_queue *queue,
+                                    unsigned int count)
+{
+       struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
+       struct isp_video *video = vfh->video;
+       struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+       unsigned long flags;
+       int ret;
+
+       /* In sensor-to-memory mode, the stream can be started synchronously
+        * to the stream on command. In memory-to-memory mode, it will be
+        * started when buffers are queued on both the input and output.
+        */
+       if (pipe->input)
+               return 0;
+
+       ret = omap3isp_pipeline_set_stream(pipe,
+                                          ISP_PIPELINE_STREAM_CONTINUOUS);
+       if (ret < 0) {
+               spin_lock_irqsave(&video->irqlock, flags);
+               omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED);
+               spin_unlock_irqrestore(&video->irqlock, flags);
+               return ret;
+       }
+
+       spin_lock_irqsave(&video->irqlock, flags);
+       if (list_empty(&video->dmaqueue))
+               video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+       spin_unlock_irqrestore(&video->irqlock, flags);
+
+       return 0;
+}
+
 static const struct vb2_ops isp_video_queue_ops = {
        .queue_setup = isp_video_queue_setup,
        .buf_prepare = isp_video_buffer_prepare,
        .buf_queue = isp_video_buffer_queue,
+       .start_streaming = isp_video_start_streaming,
 };
 
 /*
@@ -459,7 +517,7 @@ static const struct vb2_ops isp_video_queue_ops = {
 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
 {
        struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
-       enum isp_pipeline_state state;
+       enum vb2_buffer_state vb_state;
        struct isp_buffer *buf;
        unsigned long flags;
 
@@ -495,17 +553,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
 
        /* Report pipeline errors to userspace on the capture device side. */
        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
-               state = VB2_BUF_STATE_ERROR;
+               vb_state = VB2_BUF_STATE_ERROR;
                pipe->error = false;
        } else {
-               state = VB2_BUF_STATE_DONE;
+               vb_state = VB2_BUF_STATE_DONE;
        }
 
-       vb2_buffer_done(&buf->vb.vb2_buf, state);
+       vb2_buffer_done(&buf->vb.vb2_buf, vb_state);
 
        spin_lock_irqsave(&video->irqlock, flags);
 
        if (list_empty(&video->dmaqueue)) {
+               enum isp_pipeline_state state;
+
                spin_unlock_irqrestore(&video->irqlock, flags);
 
                if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -541,26 +601,16 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
  * omap3isp_video_cancel_stream - Cancel stream on a video node
  * @video: ISP video object
  *
- * Cancelling a stream mark all buffers on the video node as erroneous and makes
- * sure no new buffer can be queued.
+ * Cancelling a stream returns all buffers queued on the video node to videobuf2
+ * in the erroneous state and makes sure no new buffer can be queued.
  */
 void omap3isp_video_cancel_stream(struct isp_video *video)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&video->irqlock, flags);
-
-       while (!list_empty(&video->dmaqueue)) {
-               struct isp_buffer *buf;
-
-               buf = list_first_entry(&video->dmaqueue,
-                                      struct isp_buffer, irqlist);
-               list_del(&buf->irqlist);
-               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
-       }
-
+       omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR);
        video->error = true;
-
        spin_unlock_irqrestore(&video->irqlock, flags);
 }
 
@@ -1087,29 +1137,10 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        if (ret < 0)
                goto err_check_format;
 
-       /* In sensor-to-memory mode, the stream can be started synchronously
-        * to the stream on command. In memory-to-memory mode, it will be
-        * started when buffers are queued on both the input and output.
-        */
-       if (pipe->input == NULL) {
-               ret = omap3isp_pipeline_set_stream(pipe,
-                                             ISP_PIPELINE_STREAM_CONTINUOUS);
-               if (ret < 0)
-                       goto err_set_stream;
-               spin_lock_irqsave(&video->irqlock, flags);
-               if (list_empty(&video->dmaqueue))
-                       video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
-               spin_unlock_irqrestore(&video->irqlock, flags);
-       }
-
        mutex_unlock(&video->stream_lock);
 
        return 0;
 
-err_set_stream:
-       mutex_lock(&video->queue_lock);
-       vb2_streamoff(&vfh->queue, type);
-       mutex_unlock(&video->queue_lock);
 err_check_format:
        media_entity_pipeline_stop(&video->video.entity);
 err_pipeline_start:
index 190e259a6a2df87553dd2e7c3b655b0f7ebca08e..443e8f7673e2fe02164ba907ca5744a12bf44785 100644 (file)
@@ -33,9 +33,9 @@ enum isp_interface_type {
  * struct isp_parallel_cfg - Parallel interface configuration
  * @data_lane_shift: Data lane shifter
  *             0 - CAMEXT[13:0] -> CAM[13:0]
- *             1 - CAMEXT[13:2] -> CAM[11:0]
- *             2 - CAMEXT[13:4] -> CAM[9:0]
- *             3 - CAMEXT[13:6] -> CAM[7:0]
+ *             2 - CAMEXT[13:2] -> CAM[11:0]
+ *             4 - CAMEXT[13:4] -> CAM[9:0]
+ *             6 - CAMEXT[13:6] -> CAM[7:0]
  * @clk_pol: Pixel clock polarity
  *             0 - Sample on rising edge, 1 - Sample on falling edge
  * @hs_pol: Horizontal synchronization polarity
@@ -48,7 +48,7 @@ enum isp_interface_type {
  *             0 - Normal, 1 - One's complement
  */
 struct isp_parallel_cfg {
-       unsigned int data_lane_shift:2;
+       unsigned int data_lane_shift:3;
        unsigned int clk_pol:1;
        unsigned int hs_pol:1;
        unsigned int vs_pol:1;
index 485f5259acb082d4e71484c12ad730c0142dc77c..552789a69c8645bd94af18f71cee62192fa3d80c 100644 (file)
@@ -1613,6 +1613,7 @@ static const struct of_device_id jpu_dt_ids[] = {
        { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
        { .compatible = "renesas,jpu-r8a7792" }, /* V2H */
        { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+       { .compatible = "renesas,rcar-gen2-jpu" },
        { },
 };
 MODULE_DEVICE_TABLE(of, jpu_dt_ids);
index c398b285180cda757ec291bb6cc0c004c3c27de4..1af779ee3c747f31aff5d79d7d3db66d65a41b03 100644 (file)
@@ -795,7 +795,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
                        xlate->host_fmt = &isi_camera_formats[i];
                        xlate->code     = code.code;
                        dev_dbg(icd->parent, "Providing format %s using code %d\n",
-                               isi_camera_formats[0].name, code.code);
+                               xlate->host_fmt->name, xlate->code);
                }
                break;
        default:
index b7fd695b9ed5187d2f8d2c356269ebe6fc1d26d4..dc75a80794fb92658f0ab375d924eeeed74b3467 100644 (file)
 #define RCAR_VIN_BT656                 (1 << 3)
 
 enum chip_id {
+       RCAR_GEN3,
        RCAR_GEN2,
        RCAR_H1,
        RCAR_M1,
@@ -1818,6 +1819,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
 
 #ifdef CONFIG_OF
 static const struct of_device_id rcar_vin_of_table[] = {
+       { .compatible = "renesas,vin-r8a7795", .data = (void *)RCAR_GEN3 },
        { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
        { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
        { .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
index 90c87f2b4ec075bbe96ee15201494f3dcc757753..b9f369c0fb9473190bfc7d4f41c63b503a1b1785 100644 (file)
@@ -213,8 +213,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
                        unsigned int *count, unsigned int *num_planes,
                        unsigned int sizes[], void *alloc_ctxs[])
 {
-       struct soc_camera_device *icd = container_of(vq,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
 
@@ -361,8 +360,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
 static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
 {
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct soc_camera_device *icd = container_of(vb->vb2_queue,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
        struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
@@ -413,8 +411,7 @@ error:
 static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
 {
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct soc_camera_device *icd = container_of(vb->vb2_queue,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -444,8 +441,7 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
 static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
 {
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct soc_camera_device *icd = container_of(vb->vb2_queue,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
 
@@ -460,7 +456,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
 
 static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
 {
-       struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(q);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
        struct list_head *buf_head, *tmp;
index cc84c6d6a701ce249722010ed514c1e4a4630def..46c7186f78679a5ca09a01d8709cbb77cf1332bc 100644 (file)
@@ -1493,6 +1493,8 @@ static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier,
                                        struct soc_camera_async_client, notifier);
        struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
 
+       icd->control = NULL;
+
        if (icd->clk) {
                v4l2_clk_unregister(icd->clk);
                icd->clk = NULL;
index 69d7fe4471c2ce887c57f15551df6fbd1f385b63..2c0015b1264d3254451a7c56f9c9b1a69b760ad9 100644 (file)
@@ -118,7 +118,7 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
                struct channel_info *tsin, int chan_num)
 {
        struct tda18212_config *tda18212;
-       struct stv6110x_devctl *fe2;
+       const struct stv6110x_devctl *fe2;
        struct i2c_client *client;
        struct i2c_board_info tda18212_info = {
                .type = "tda18212",
index be680f839e77c9f3d7ab0e7366762093a35e88fe..e236059a60ad3ccf9b16c1861b5297abd549c266 100644 (file)
@@ -3,3 +3,7 @@ obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
 ti-vpe-y := vpe.o sc.o csc.o vpdma.o
 
 ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
+
+ti-cal-y := cal.o
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
new file mode 100644 (file)
index 0000000..69ec79b
--- /dev/null
@@ -0,0 +1,1970 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+
+#include <media/v4l2-of.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include "cal_regs.h"
+
+#define CAL_MODULE_NAME "cal"
+
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
+#define CAL_VERSION "0.1.0"
+
+MODULE_DESCRIPTION("TI CAL driver");
+MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(CAL_VERSION);
+
+static unsigned video_nr = -1;
+module_param(video_nr, uint, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+       tpf_default = {.numerator = 1001,       .denominator = 30000};
+
+#define cal_dbg(level, caldev, fmt, arg...)    \
+               v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
+#define cal_info(caldev, fmt, arg...)  \
+               v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
+#define cal_err(caldev, fmt, arg...)   \
+               v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
+
+#define ctx_dbg(level, ctx, fmt, arg...)       \
+               v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
+#define ctx_info(ctx, fmt, arg...)     \
+               v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
+#define ctx_err(ctx, fmt, arg...)      \
+               v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
+
+#define CAL_NUM_INPUT 1
+#define CAL_NUM_CONTEXT 2
+
+#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
+
+#define reg_read(dev, offset) ioread32(dev->base + offset)
+#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
+
+#define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
+                                                   mask)
+#define reg_write_field(dev, offset, field, mask) { \
+       u32 val = reg_read(dev, offset); \
+       set_field(&val, field, mask); \
+       reg_write(dev, offset, val); }
+
+/* ------------------------------------------------------------------
+ *     Basic structures
+ * ------------------------------------------------------------------
+ */
+
+struct cal_fmt {
+       u32     fourcc;
+       u32     code;
+       u8      depth;
+};
+
+static struct cal_fmt cal_formats[] = {
+       {
+               .fourcc         = V4L2_PIX_FMT_YUYV,
+               .code           = MEDIA_BUS_FMT_YUYV8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_UYVY,
+               .code           = MEDIA_BUS_FMT_UYVY8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_YVYU,
+               .code           = MEDIA_BUS_FMT_YVYU8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_VYUY,
+               .code           = MEDIA_BUS_FMT_VYUY8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+               .code           = MEDIA_BUS_FMT_RGB565_2X8_LE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+               .code           = MEDIA_BUS_FMT_RGB565_2X8_BE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+               .code           = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+               .code           = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB24, /* rgb */
+               .code           = MEDIA_BUS_FMT_RGB888_2X12_LE,
+               .depth          = 24,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_BGR24, /* bgr */
+               .code           = MEDIA_BUS_FMT_RGB888_2X12_BE,
+               .depth          = 24,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB32, /* argb */
+               .code           = MEDIA_BUS_FMT_ARGB8888_1X32,
+               .depth          = 32,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SBGGR8,
+               .code           = MEDIA_BUS_FMT_SBGGR8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGBRG8,
+               .code           = MEDIA_BUS_FMT_SGBRG8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGRBG8,
+               .code           = MEDIA_BUS_FMT_SGRBG8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SRGGB8,
+               .code           = MEDIA_BUS_FMT_SRGGB8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SBGGR10,
+               .code           = MEDIA_BUS_FMT_SBGGR10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGBRG10,
+               .code           = MEDIA_BUS_FMT_SGBRG10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGRBG10,
+               .code           = MEDIA_BUS_FMT_SGRBG10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SRGGB10,
+               .code           = MEDIA_BUS_FMT_SRGGB10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SBGGR12,
+               .code           = MEDIA_BUS_FMT_SBGGR12_1X12,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGBRG12,
+               .code           = MEDIA_BUS_FMT_SGBRG12_1X12,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGRBG12,
+               .code           = MEDIA_BUS_FMT_SGRBG12_1X12,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SRGGB12,
+               .code           = MEDIA_BUS_FMT_SRGGB12_1X12,
+               .depth          = 16,
+       },
+};
+
+/*  Print Four-character-code (FOURCC) */
+static char *fourcc_to_str(u32 fmt)
+{
+       static char code[5];
+
+       code[0] = (unsigned char)(fmt & 0xff);
+       code[1] = (unsigned char)((fmt >> 8) & 0xff);
+       code[2] = (unsigned char)((fmt >> 16) & 0xff);
+       code[3] = (unsigned char)((fmt >> 24) & 0xff);
+       code[4] = '\0';
+
+       return code;
+}
+
+/* buffer for one video frame */
+struct cal_buffer {
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer  vb;
+       struct list_head        list;
+       const struct cal_fmt    *fmt;
+};
+
+struct cal_dmaqueue {
+       struct list_head        active;
+
+       /* Counters to control fps rate */
+       int                     frame;
+       int                     ini_jiffies;
+};
+
+struct cm_data {
+       void __iomem            *base;
+       struct resource         *res;
+
+       unsigned int            camerrx_control;
+
+       struct platform_device *pdev;
+};
+
+struct cc_data {
+       void __iomem            *base;
+       struct resource         *res;
+
+       struct platform_device *pdev;
+};
+
+/*
+ * there is one cal_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct cal_dev {
+       int                     irq;
+       void __iomem            *base;
+       struct resource         *res;
+       struct platform_device  *pdev;
+       struct v4l2_device      v4l2_dev;
+
+       /* Control Module handle */
+       struct cm_data          *cm;
+       /* Camera Core Module handle */
+       struct cc_data          *cc[CAL_NUM_CSI2_PORTS];
+
+       struct cal_ctx          *ctx[CAL_NUM_CONTEXT];
+};
+
+/*
+ * There is one cal_ctx structure for each camera core context.
+ */
+struct cal_ctx {
+       struct v4l2_device      v4l2_dev;
+       struct v4l2_ctrl_handler ctrl_handler;
+       struct video_device     vdev;
+       struct v4l2_async_notifier notifier;
+       struct v4l2_subdev      *sensor;
+       struct v4l2_of_endpoint endpoint;
+
+       struct v4l2_async_subdev asd;
+       struct v4l2_async_subdev *asd_list[1];
+
+       struct v4l2_fh          fh;
+       struct cal_dev          *dev;
+       struct cc_data          *cc;
+
+       /* v4l2_ioctl mutex */
+       struct mutex            mutex;
+       /* v4l2 buffers lock */
+       spinlock_t              slock;
+
+       /* Several counters */
+       unsigned long           jiffies;
+
+       struct vb2_alloc_ctx    *alloc_ctx;
+       struct cal_dmaqueue     vidq;
+
+       /* Input Number */
+       int                     input;
+
+       /* video capture */
+       const struct cal_fmt    *fmt;
+       /* Used to store current pixel format */
+       struct v4l2_format              v_fmt;
+       /* Used to store current mbus frame format */
+       struct v4l2_mbus_framefmt       m_fmt;
+
+       /* Current subdev enumerated format */
+       struct cal_fmt          *active_fmt[ARRAY_SIZE(cal_formats)];
+       int                     num_active_fmt;
+
+       struct v4l2_fract       timeperframe;
+       unsigned int            sequence;
+       unsigned int            external_rate;
+       struct vb2_queue        vb_vidq;
+       unsigned int            seq_count;
+       unsigned int            csi2_port;
+       unsigned int            virtual_channel;
+
+       /* Pointer pointing to current v4l2_buffer */
+       struct cal_buffer       *cur_frm;
+       /* Pointer pointing to next v4l2_buffer */
+       struct cal_buffer       *next_frm;
+};
+
+static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
+                                               u32 pixelformat)
+{
+       const struct cal_fmt *fmt;
+       unsigned int k;
+
+       for (k = 0; k < ctx->num_active_fmt; k++) {
+               fmt = ctx->active_fmt[k];
+               if (fmt->fourcc == pixelformat)
+                       return fmt;
+       }
+
+       return NULL;
+}
+
+static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
+                                                u32 code)
+{
+       const struct cal_fmt *fmt;
+       unsigned int k;
+
+       for (k = 0; k < ctx->num_active_fmt; k++) {
+               fmt = ctx->active_fmt[k];
+               if (fmt->code == code)
+                       return fmt;
+       }
+
+       return NULL;
+}
+
+static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
+{
+       return container_of(n, struct cal_ctx, notifier);
+}
+
+static inline int get_field(u32 value, u32 mask)
+{
+       return (value & mask) >> __ffs(mask);
+}
+
+static inline void set_field(u32 *valp, u32 field, u32 mask)
+{
+       u32 val = *valp;
+
+       val &= ~mask;
+       val |= (field << __ffs(mask)) & mask;
+       *valp = val;
+}
+
+/*
+ * Control Module block access
+ */
+static struct cm_data *cm_create(struct cal_dev *dev)
+{
+       struct platform_device *pdev = dev->pdev;
+       struct cm_data *cm;
+
+       cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
+       if (!cm)
+               return ERR_PTR(-ENOMEM);
+
+       cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                               "camerrx_control");
+       cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
+       if (IS_ERR(cm->base)) {
+               cal_err(dev, "failed to ioremap\n");
+               return cm->base;
+       }
+
+       cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+               cm->res->name, &cm->res->start, &cm->res->end);
+
+       return cm;
+}
+
+static void camerarx_phy_enable(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       if (!ctx->dev->cm->base) {
+               ctx_err(ctx, "cm not mapped\n");
+               return;
+       }
+
+       val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+       if (ctx->csi2_port == 1) {
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+               set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
+               /* enable all lanes by default */
+               set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
+       } else if (ctx->csi2_port == 2) {
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+               set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
+               /* enable all lanes by default */
+               set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
+       }
+       reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+static void camerarx_phy_disable(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       if (!ctx->dev->cm->base) {
+               ctx_err(ctx, "cm not mapped\n");
+               return;
+       }
+
+       val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+       if (ctx->csi2_port == 1)
+               set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+       else if (ctx->csi2_port == 2)
+               set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+       reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+/*
+ * Camera Instance access block
+ */
+static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
+{
+       struct platform_device *pdev = dev->pdev;
+       struct cc_data *cc;
+
+       cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
+       if (!cc)
+               return ERR_PTR(-ENOMEM);
+
+       cc->res = platform_get_resource_byname(pdev,
+                                              IORESOURCE_MEM,
+                                              (core == 0) ?
+                                               "cal_rx_core0" :
+                                               "cal_rx_core1");
+       cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
+       if (IS_ERR(cc->base)) {
+               cal_err(dev, "failed to ioremap\n");
+               return cc->base;
+       }
+
+       cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+               cc->res->name, &cc->res->start, &cc->res->end);
+
+       return cc;
+}
+
+/*
+ * Get Revision and HW info
+ */
+static void cal_get_hwinfo(struct cal_dev *dev)
+{
+       u32 revision = 0;
+       u32 hwinfo = 0;
+
+       revision = reg_read(dev, CAL_HL_REVISION);
+       cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
+               revision);
+
+       hwinfo = reg_read(dev, CAL_HL_HWINFO);
+       cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
+               hwinfo);
+}
+
+static inline int cal_runtime_get(struct cal_dev *dev)
+{
+       int r;
+
+       r = pm_runtime_get_sync(&dev->pdev->dev);
+
+       return r;
+}
+
+static inline void cal_runtime_put(struct cal_dev *dev)
+{
+       pm_runtime_put_sync(&dev->pdev->dev);
+}
+
+static void cal_quickdump_regs(struct cal_dev *dev)
+{
+       cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                      dev->base, resource_size(dev->res), false);
+
+       if (dev->ctx[0]) {
+               cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
+                        &dev->ctx[0]->cc->res->start);
+               print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                              dev->ctx[0]->cc->base,
+                              resource_size(dev->ctx[0]->cc->res),
+                              false);
+       }
+
+       if (dev->ctx[1]) {
+               cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
+                        &dev->ctx[1]->cc->res->start);
+               print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                              dev->ctx[1]->cc->base,
+                              resource_size(dev->ctx[1]->cc->res),
+                              false);
+       }
+
+       cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
+                &dev->cm->res->start);
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                      dev->cm->base,
+                      resource_size(dev->cm->res), false);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct cal_ctx *ctx)
+{
+       /* Enable IRQ_WDMA_END 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_SET(2),
+                       CAL_HL_IRQ_ENABLE,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Enable IRQ_WDMA_START 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_SET(3),
+                       CAL_HL_IRQ_ENABLE,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+       reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
+}
+
+static void disable_irqs(struct cal_ctx *ctx)
+{
+       /* Disable IRQ_WDMA_END 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_CLR(2),
+                       CAL_HL_IRQ_CLEAR,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Disable IRQ_WDMA_START 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_CLR(3),
+                       CAL_HL_IRQ_CLEAR,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+       reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
+}
+
+static void csi2_init(struct cal_ctx *ctx)
+{
+       int i;
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
+       set_field(&val, CAL_GEN_ENABLE,
+                 CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
+       set_field(&val, CAL_GEN_ENABLE,
+                 CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
+       set_field(&val, CAL_GEN_DISABLE,
+                 CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
+       set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
+       reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+
+       val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+       set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+                 CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+       set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
+                 CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+       reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+       for (i = 0; i < 10; i++) {
+               if (reg_read_field(ctx->dev,
+                                  CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+                                  CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
+                   CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
+                       break;
+               usleep_range(1000, 1100);
+       }
+       ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+
+       val = reg_read(ctx->dev, CAL_CTRL);
+       set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
+       set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
+       set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+                 CAL_CTRL_POSTED_WRITES_MASK);
+       set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
+       set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
+       reg_write(ctx->dev, CAL_CTRL, val);
+       ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
+}
+
+static void csi2_lane_config(struct cal_ctx *ctx)
+{
+       u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+       u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
+       u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
+       struct v4l2_of_bus_mipi_csi2 *mipi_csi2 = &ctx->endpoint.bus.mipi_csi2;
+       int lane;
+
+       set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
+       set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
+       for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
+               /*
+                * Every lane are one nibble apart starting with the
+                * clock followed by the data lanes so shift masks by 4.
+                */
+               lane_mask <<= 4;
+               polarity_mask <<= 4;
+               set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
+               set_field(&val, mipi_csi2->lane_polarities[lane + 1],
+                         polarity_mask);
+       }
+
+       reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
+               ctx->csi2_port, val);
+}
+
+static void csi2_ppi_enable(struct cal_ctx *ctx)
+{
+       reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+                       CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ppi_disable(struct cal_ctx *ctx)
+{
+       reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+                       CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ctx_config(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
+       set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
+       /*
+        * DT type: MIPI CSI-2 Specs
+        *   0x1: All - DT filter is disabled
+        *  0x24: RGB888 1 pixel  = 3 bytes
+        *  0x2B: RAW10  4 pixels = 5 bytes
+        *  0x2A: RAW8   1 pixel  = 1 byte
+        *  0x1E: YUV422 2 pixels = 4 bytes
+        */
+       set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
+       /* Virtual Channel from the CSI2 sensor usually 0! */
+       set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
+       /* NUM_LINES_PER_FRAME => 0 means auto detect */
+       set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
+       set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
+       set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
+                 CAL_CSI2_CTX_PACK_MODE_MASK);
+       reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
+}
+
+static void pix_proc_config(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
+       set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
+       set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
+       set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
+       set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
+       set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
+       set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
+       reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_config(struct cal_ctx *ctx,
+                             unsigned int width)
+{
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
+       set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+       set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
+                 CAL_WR_DMA_CTRL_DTAG_MASK);
+       set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+                 CAL_WR_DMA_CTRL_MODE_MASK);
+       set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
+                 CAL_WR_DMA_CTRL_PATTERN_MASK);
+       set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
+       reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
+
+       /*
+        * width/16 not sure but giving it a whirl.
+        * zero does not work right
+        */
+       reg_write_field(ctx->dev,
+                       CAL_WR_DMA_OFST(ctx->csi2_port),
+                       (width / 16),
+                       CAL_WR_DMA_OFST_MASK);
+       ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
+
+       val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
+       /* 64 bit word means no skipping */
+       set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
+       /*
+        * (width*8)/64 this should be size of an entire line
+        * in 64bit word but 0 means all data until the end
+        * is detected automagically
+        */
+       set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
+       reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
+{
+       reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM      0
+#define TCLK_MISS      1
+#define TCLK_SETTLE    14
+#define THS_SETTLE     15
+
+static void csi2_phy_config(struct cal_ctx *ctx)
+{
+       unsigned int reg0, reg1;
+       unsigned int ths_term, ths_settle;
+       unsigned int ddrclkperiod_us;
+
+       /*
+        * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
+        */
+       ddrclkperiod_us = ctx->external_rate / 2000000;
+       ddrclkperiod_us = 1000000 / ddrclkperiod_us;
+       ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
+
+       ths_term = 20000 / ddrclkperiod_us;
+       ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
+       ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
+
+       /*
+        * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
+        *      Since CtrlClk is fixed at 96Mhz then we get
+        *      ths_settle = floor(176.3 / 10.416) - 1 = 15
+        * If we ever switch to a dynamic clock then this code might be useful
+        *
+        * unsigned int ctrlclkperiod_us;
+        * ctrlclkperiod_us = 96000000 / 1000000;
+        * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
+        * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
+
+        * ths_settle = 176300  / ctrlclkperiod_us;
+        * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
+        */
+
+       ths_settle = THS_SETTLE;
+       ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
+
+       reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
+       set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
+                 CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
+       set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
+       set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
+
+       ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
+       reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
+
+       reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
+       set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
+       set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
+       set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
+       set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
+
+       ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
+       reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
+}
+
+static int cal_get_external_info(struct cal_ctx *ctx)
+{
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
+       if (!ctrl) {
+               ctx_err(ctx, "no pixel rate control in subdev: %s\n",
+                       ctx->sensor->name);
+               return -EPIPE;
+       }
+
+       ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+       ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
+
+       return 0;
+}
+
+static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
+{
+       struct cal_dmaqueue *dma_q = &ctx->vidq;
+       struct cal_buffer *buf;
+       unsigned long addr;
+
+       buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+       ctx->next_frm = buf;
+       list_del(&buf->list);
+
+       addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+       cal_wr_dma_addr(ctx, addr);
+}
+
+static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
+{
+       ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+       ctx->cur_frm->vb.field = ctx->m_fmt.field;
+       ctx->cur_frm->vb.sequence = ctx->sequence++;
+
+       vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       ctx->cur_frm = ctx->next_frm;
+}
+
+#define isvcirqset(irq, vc, ff) (irq & \
+       (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
+
+#define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
+
+static irqreturn_t cal_irq(int irq_cal, void *data)
+{
+       struct cal_dev *dev = (struct cal_dev *)data;
+       struct cal_ctx *ctx;
+       struct cal_dmaqueue *dma_q;
+       u32 irqst2, irqst3;
+
+       /* Check which DMA just finished */
+       irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
+       if (irqst2) {
+               /* Clear Interrupt status */
+               reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
+
+               /* Need to check both port */
+               if (isportirqset(irqst2, 1)) {
+                       ctx = dev->ctx[0];
+
+                       if (ctx->cur_frm != ctx->next_frm)
+                               cal_process_buffer_complete(ctx);
+               }
+
+               if (isportirqset(irqst2, 2)) {
+                       ctx = dev->ctx[1];
+
+                       if (ctx->cur_frm != ctx->next_frm)
+                               cal_process_buffer_complete(ctx);
+               }
+       }
+
+       /* Check which DMA just started */
+       irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
+       if (irqst3) {
+               /* Clear Interrupt status */
+               reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
+
+               /* Need to check both port */
+               if (isportirqset(irqst3, 1)) {
+                       ctx = dev->ctx[0];
+                       dma_q = &ctx->vidq;
+
+                       spin_lock(&ctx->slock);
+                       if (!list_empty(&dma_q->active) &&
+                           ctx->cur_frm == ctx->next_frm)
+                               cal_schedule_next_buffer(ctx);
+                       spin_unlock(&ctx->slock);
+               }
+
+               if (isportirqset(irqst3, 2)) {
+                       ctx = dev->ctx[1];
+                       dma_q = &ctx->vidq;
+
+                       spin_lock(&ctx->slock);
+                       if (!list_empty(&dma_q->active) &&
+                           ctx->cur_frm == ctx->next_frm)
+                               cal_schedule_next_buffer(ctx);
+                       spin_unlock(&ctx->slock);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int cal_querycap(struct file *file, void *priv,
+                       struct v4l2_capability *cap)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
+       strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
+
+       snprintf(cap->bus_info, sizeof(cap->bus_info),
+                "platform:%s", ctx->v4l2_dev.name);
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+                           V4L2_CAP_READWRITE;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+       return 0;
+}
+
+static int cal_enum_fmt_vid_cap(struct file *file, void  *priv,
+                               struct v4l2_fmtdesc *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt = NULL;
+
+       if (f->index >= ctx->num_active_fmt)
+               return -EINVAL;
+
+       fmt = ctx->active_fmt[f->index];
+
+       f->pixelformat = fmt->fourcc;
+       f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       return 0;
+}
+
+static int __subdev_get_format(struct cal_ctx *ctx,
+                              struct v4l2_mbus_framefmt *fmt)
+{
+       struct v4l2_subdev_format sd_fmt;
+       struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+       int ret;
+
+       if (!ctx->sensor)
+               return -EINVAL;
+
+       sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       sd_fmt.pad = 0;
+
+       ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
+       if (ret)
+               return ret;
+
+       *fmt = *mbus_fmt;
+
+       ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+               fmt->width, fmt->height, fmt->code);
+
+       return 0;
+}
+
+static int __subdev_set_format(struct cal_ctx *ctx,
+                              struct v4l2_mbus_framefmt *fmt)
+{
+       struct v4l2_subdev_format sd_fmt;
+       struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+       int ret;
+
+       if (!ctx->sensor)
+               return -EINVAL;
+
+       sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       sd_fmt.pad = 0;
+       *mbus_fmt = *fmt;
+
+       ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
+       if (ret)
+               return ret;
+
+       ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+               fmt->width, fmt->height, fmt->code);
+
+       return 0;
+}
+
+static int cal_calc_format_size(struct cal_ctx *ctx,
+                               const struct cal_fmt *fmt,
+                               struct v4l2_format *f)
+{
+       if (!fmt) {
+               ctx_dbg(3, ctx, "No cal_fmt provided!\n");
+               return -EINVAL;
+       }
+
+       v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+                             &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
+       f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
+                                                fmt->depth >> 3);
+       f->fmt.pix.sizeimage = f->fmt.pix.height *
+                              f->fmt.pix.bytesperline;
+
+       ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+               __func__, fourcc_to_str(f->fmt.pix.pixelformat),
+               f->fmt.pix.width, f->fmt.pix.height,
+               f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+       return 0;
+}
+
+static int cal_g_fmt_vid_cap(struct file *file, void *priv,
+                            struct v4l2_format *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       *f = ctx->v_fmt;
+
+       return 0;
+}
+
+static int cal_try_fmt_vid_cap(struct file *file, void *priv,
+                              struct v4l2_format *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt;
+       struct v4l2_subdev_frame_size_enum fse;
+       int ret, found;
+
+       fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+       if (!fmt) {
+               ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
+                       f->fmt.pix.pixelformat);
+
+               /* Just get the first one enumerated */
+               fmt = ctx->active_fmt[0];
+               f->fmt.pix.pixelformat = fmt->fourcc;
+       }
+
+       f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
+
+       /* check for/find a valid width/height */
+       ret = 0;
+       found = false;
+       fse.pad = 0;
+       fse.code = fmt->code;
+       fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       for (fse.index = 0; ; fse.index++) {
+               ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
+                                      NULL, &fse);
+               if (ret)
+                       break;
+
+               if ((f->fmt.pix.width == fse.max_width) &&
+                   (f->fmt.pix.height == fse.max_height)) {
+                       found = true;
+                       break;
+               } else if ((f->fmt.pix.width >= fse.min_width) &&
+                        (f->fmt.pix.width <= fse.max_width) &&
+                        (f->fmt.pix.height >= fse.min_height) &&
+                        (f->fmt.pix.height <= fse.max_height)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               /* use existing values as default */
+               f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
+               f->fmt.pix.height =  ctx->v_fmt.fmt.pix.height;
+       }
+
+       /*
+        * Use current colorspace for now, it will get
+        * updated properly during s_fmt
+        */
+       f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
+       return cal_calc_format_size(ctx, fmt, f);
+}
+
+static int cal_s_fmt_vid_cap(struct file *file, void *priv,
+                            struct v4l2_format *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       struct vb2_queue *q = &ctx->vb_vidq;
+       const struct cal_fmt *fmt;
+       struct v4l2_mbus_framefmt mbus_fmt;
+       int ret;
+
+       if (vb2_is_busy(q)) {
+               ctx_dbg(3, ctx, "%s device busy\n", __func__);
+               return -EBUSY;
+       }
+
+       ret = cal_try_fmt_vid_cap(file, priv, f);
+       if (ret < 0)
+               return ret;
+
+       fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+
+       v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
+
+       ret = __subdev_set_format(ctx, &mbus_fmt);
+       if (ret)
+               return ret;
+
+       /* Just double check nothing has gone wrong */
+       if (mbus_fmt.code != fmt->code) {
+               ctx_dbg(3, ctx,
+                       "%s subdev changed format on us, this should not happen\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+       ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       ctx->v_fmt.fmt.pix.pixelformat  = fmt->fourcc;
+       cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+       ctx->fmt = fmt;
+       ctx->m_fmt = mbus_fmt;
+       *f = ctx->v_fmt;
+
+       return 0;
+}
+
+static int cal_enum_framesizes(struct file *file, void *fh,
+                              struct v4l2_frmsizeenum *fsize)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt;
+       struct v4l2_subdev_frame_size_enum fse;
+       int ret;
+
+       /* check for valid format */
+       fmt = find_format_by_pix(ctx, fsize->pixel_format);
+       if (!fmt) {
+               ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
+                       fsize->pixel_format);
+               return -EINVAL;
+       }
+
+       fse.index = fsize->index;
+       fse.pad = 0;
+       fse.code = fmt->code;
+
+       ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
+       if (ret)
+               return -EINVAL;
+
+       ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+               __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+               fse.min_height, fse.max_height);
+
+       fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+       fsize->discrete.width = fse.max_width;
+       fsize->discrete.height = fse.max_height;
+
+       return 0;
+}
+
+static int cal_enum_input(struct file *file, void *priv,
+                         struct v4l2_input *inp)
+{
+       if (inp->index >= CAL_NUM_INPUT)
+               return -EINVAL;
+
+       inp->type = V4L2_INPUT_TYPE_CAMERA;
+       sprintf(inp->name, "Camera %u", inp->index);
+       return 0;
+}
+
+static int cal_g_input(struct file *file, void *priv, unsigned int *i)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       *i = ctx->input;
+       return 0;
+}
+
+static int cal_s_input(struct file *file, void *priv, unsigned int i)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       if (i >= CAL_NUM_INPUT)
+               return -EINVAL;
+
+       ctx->input = i;
+       return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+static int cal_enum_frameintervals(struct file *file, void *priv,
+                                  struct v4l2_frmivalenum *fival)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt;
+       struct v4l2_subdev_frame_size_enum fse;
+       int ret;
+
+       if (fival->index)
+               return -EINVAL;
+
+       fmt = find_format_by_pix(ctx, fival->pixel_format);
+       if (!fmt)
+               return -EINVAL;
+
+       /* check for valid width/height */
+       ret = 0;
+       fse.pad = 0;
+       fse.code = fmt->code;
+       fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       for (fse.index = 0; ; fse.index++) {
+               ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
+                                      NULL, &fse);
+               if (ret)
+                       return -EINVAL;
+
+               if ((fival->width == fse.max_width) &&
+                   (fival->height == fse.max_height))
+                       break;
+               else if ((fival->width >= fse.min_width) &&
+                        (fival->width <= fse.max_width) &&
+                        (fival->height >= fse.min_height) &&
+                        (fival->height <= fse.max_height))
+                       break;
+
+               return -EINVAL;
+       }
+
+       fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+       fival->discrete.numerator = 1;
+       fival->discrete.denominator = 30;
+
+       return 0;
+}
+
+/*
+ * Videobuf operations
+ */
+static int cal_queue_setup(struct vb2_queue *vq,
+                          unsigned int *nbuffers, unsigned int *nplanes,
+                          unsigned int sizes[], void *alloc_ctxs[])
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+       unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
+
+       if (vq->num_buffers + *nbuffers < 3)
+               *nbuffers = 3 - vq->num_buffers;
+       alloc_ctxs[0] = ctx->alloc_ctx;
+
+       if (*nplanes) {
+               if (sizes[0] < size)
+                       return -EINVAL;
+               size = sizes[0];
+       }
+
+       *nplanes = 1;
+       sizes[0] = size;
+
+       ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
+
+       return 0;
+}
+
+static int cal_buffer_prepare(struct vb2_buffer *vb)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+       struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+                                             vb.vb2_buf);
+       unsigned long size;
+
+       if (WARN_ON(!ctx->fmt))
+               return -EINVAL;
+
+       size = ctx->v_fmt.fmt.pix.sizeimage;
+       if (vb2_plane_size(vb, 0) < size) {
+               ctx_err(ctx,
+                       "data will not fit into plane (%lu < %lu)\n",
+                       vb2_plane_size(vb, 0), size);
+               return -EINVAL;
+       }
+
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+       return 0;
+}
+
+static void cal_buffer_queue(struct vb2_buffer *vb)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+       struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+                                             vb.vb2_buf);
+       struct cal_dmaqueue *vidq = &ctx->vidq;
+       unsigned long flags = 0;
+
+       /* recheck locking */
+       spin_lock_irqsave(&ctx->slock, flags);
+       list_add_tail(&buf->list, &vidq->active);
+       spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+       struct cal_dmaqueue *dma_q = &ctx->vidq;
+       struct cal_buffer *buf, *tmp;
+       unsigned long addr = 0;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ctx->slock, flags);
+       if (list_empty(&dma_q->active)) {
+               spin_unlock_irqrestore(&ctx->slock, flags);
+               ctx_dbg(3, ctx, "buffer queue is empty\n");
+               return -EIO;
+       }
+
+       buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+       ctx->cur_frm = buf;
+       ctx->next_frm = buf;
+       list_del(&buf->list);
+       spin_unlock_irqrestore(&ctx->slock, flags);
+
+       addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
+       ctx->sequence = 0;
+
+       ret = cal_get_external_info(ctx);
+       if (ret < 0)
+               goto err;
+
+       cal_runtime_get(ctx->dev);
+
+       enable_irqs(ctx);
+       camerarx_phy_enable(ctx);
+       csi2_init(ctx);
+       csi2_phy_config(ctx);
+       csi2_lane_config(ctx);
+       csi2_ctx_config(ctx);
+       pix_proc_config(ctx);
+       cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
+       cal_wr_dma_addr(ctx, addr);
+       csi2_ppi_enable(ctx);
+
+       if (ctx->sensor) {
+               if (v4l2_subdev_call(ctx->sensor, video, s_stream, 1)) {
+                       ctx_err(ctx, "stream on failed in subdev\n");
+                       cal_runtime_put(ctx->dev);
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (debug >= 4)
+               cal_quickdump_regs(ctx->dev);
+
+       return 0;
+
+err:
+       list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+               list_del(&buf->list);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+       }
+       return ret;
+}
+
+static void cal_stop_streaming(struct vb2_queue *vq)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+       struct cal_dmaqueue *dma_q = &ctx->vidq;
+       struct cal_buffer *buf, *tmp;
+       unsigned long flags;
+
+       if (ctx->sensor) {
+               if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
+                       ctx_err(ctx, "stream off failed in subdev\n");
+       }
+
+       csi2_ppi_disable(ctx);
+       disable_irqs(ctx);
+
+       /* Release all active buffers */
+       spin_lock_irqsave(&ctx->slock, flags);
+       list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+               list_del(&buf->list);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       }
+
+       if (ctx->cur_frm == ctx->next_frm) {
+               vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       } else {
+               vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
+       }
+       ctx->cur_frm = NULL;
+       ctx->next_frm = NULL;
+       spin_unlock_irqrestore(&ctx->slock, flags);
+
+       cal_runtime_put(ctx->dev);
+}
+
+static struct vb2_ops cal_video_qops = {
+       .queue_setup            = cal_queue_setup,
+       .buf_prepare            = cal_buffer_prepare,
+       .buf_queue              = cal_buffer_queue,
+       .start_streaming        = cal_start_streaming,
+       .stop_streaming         = cal_stop_streaming,
+       .wait_prepare           = vb2_ops_wait_prepare,
+       .wait_finish            = vb2_ops_wait_finish,
+};
+
+static const struct v4l2_file_operations cal_fops = {
+       .owner          = THIS_MODULE,
+       .open           = v4l2_fh_open,
+       .release        = vb2_fop_release,
+       .read           = vb2_fop_read,
+       .poll           = vb2_fop_poll,
+       .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+       .mmap           = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops cal_ioctl_ops = {
+       .vidioc_querycap      = cal_querycap,
+       .vidioc_enum_fmt_vid_cap  = cal_enum_fmt_vid_cap,
+       .vidioc_g_fmt_vid_cap     = cal_g_fmt_vid_cap,
+       .vidioc_try_fmt_vid_cap   = cal_try_fmt_vid_cap,
+       .vidioc_s_fmt_vid_cap     = cal_s_fmt_vid_cap,
+       .vidioc_enum_framesizes   = cal_enum_framesizes,
+       .vidioc_reqbufs       = vb2_ioctl_reqbufs,
+       .vidioc_create_bufs   = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
+       .vidioc_querybuf      = vb2_ioctl_querybuf,
+       .vidioc_qbuf          = vb2_ioctl_qbuf,
+       .vidioc_dqbuf         = vb2_ioctl_dqbuf,
+       .vidioc_enum_input    = cal_enum_input,
+       .vidioc_g_input       = cal_g_input,
+       .vidioc_s_input       = cal_s_input,
+       .vidioc_enum_frameintervals = cal_enum_frameintervals,
+       .vidioc_streamon      = vb2_ioctl_streamon,
+       .vidioc_streamoff     = vb2_ioctl_streamoff,
+       .vidioc_log_status    = v4l2_ctrl_log_status,
+       .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+       .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static struct video_device cal_videodev = {
+       .name           = CAL_MODULE_NAME,
+       .fops           = &cal_fops,
+       .ioctl_ops      = &cal_ioctl_ops,
+       .minor          = -1,
+       .release        = video_device_release_empty,
+};
+
+/* -----------------------------------------------------------------
+ *     Initialization and module stuff
+ * ------------------------------------------------------------------
+ */
+static int cal_complete_ctx(struct cal_ctx *ctx);
+
+static int cal_async_bound(struct v4l2_async_notifier *notifier,
+                          struct v4l2_subdev *subdev,
+                          struct v4l2_async_subdev *asd)
+{
+       struct cal_ctx *ctx = notifier_to_ctx(notifier);
+       struct v4l2_subdev_mbus_code_enum mbus_code;
+       int ret = 0;
+       int i, j, k;
+
+       if (ctx->sensor) {
+               ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
+                        subdev->name);
+               return 0;
+       }
+
+       ctx->sensor = subdev;
+       ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
+
+       /* Enumerate sub device formats and enable all matching local formats */
+       ctx->num_active_fmt = 0;
+       for (j = 0, i = 0; ret != -EINVAL; ++j) {
+               struct cal_fmt *fmt;
+
+               memset(&mbus_code, 0, sizeof(mbus_code));
+               mbus_code.index = j;
+               ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+                                      NULL, &mbus_code);
+               if (ret)
+                       continue;
+
+               ctx_dbg(2, ctx,
+                       "subdev %s: code: %04x idx: %d\n",
+                       subdev->name, mbus_code.code, j);
+
+               for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
+                       fmt = &cal_formats[k];
+
+                       if (mbus_code.code == fmt->code) {
+                               ctx->active_fmt[i] = fmt;
+                               ctx_dbg(2, ctx,
+                                       "matched fourcc: %s: code: %04x idx: %d\n",
+                                       fourcc_to_str(fmt->fourcc),
+                                       fmt->code, i);
+                               ctx->num_active_fmt = ++i;
+                       }
+               }
+       }
+
+       if (i == 0) {
+               ctx_err(ctx, "No suitable format reported by subdev %s\n",
+                       subdev->name);
+               return -EINVAL;
+       }
+
+       cal_complete_ctx(ctx);
+
+       return 0;
+}
+
+static int cal_async_complete(struct v4l2_async_notifier *notifier)
+{
+       struct cal_ctx *ctx = notifier_to_ctx(notifier);
+       const struct cal_fmt *fmt;
+       struct v4l2_mbus_framefmt mbus_fmt;
+       int ret;
+
+       ret = __subdev_get_format(ctx, &mbus_fmt);
+       if (ret)
+               return ret;
+
+       fmt = find_format_by_code(ctx, mbus_fmt.code);
+       if (!fmt) {
+               ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
+                       mbus_fmt.code);
+               return -EINVAL;
+       }
+
+       /* Save current subdev format */
+       v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+       ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       ctx->v_fmt.fmt.pix.pixelformat  = fmt->fourcc;
+       cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+       ctx->fmt = fmt;
+       ctx->m_fmt = mbus_fmt;
+
+       return 0;
+}
+
+static int cal_complete_ctx(struct cal_ctx *ctx)
+{
+       struct video_device *vfd;
+       struct vb2_queue *q;
+       int ret;
+
+       ctx->timeperframe = tpf_default;
+       ctx->external_rate = 192000000;
+
+       /* initialize locks */
+       spin_lock_init(&ctx->slock);
+       mutex_init(&ctx->mutex);
+
+       /* initialize queue */
+       q = &ctx->vb_vidq;
+       q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+       q->drv_priv = ctx;
+       q->buf_struct_size = sizeof(struct cal_buffer);
+       q->ops = &cal_video_qops;
+       q->mem_ops = &vb2_dma_contig_memops;
+       q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       q->lock = &ctx->mutex;
+       q->min_buffers_needed = 3;
+
+       ret = vb2_queue_init(q);
+       if (ret)
+               return ret;
+
+       /* init video dma queues */
+       INIT_LIST_HEAD(&ctx->vidq.active);
+
+       vfd = &ctx->vdev;
+       *vfd = cal_videodev;
+       vfd->v4l2_dev = &ctx->v4l2_dev;
+       vfd->queue = q;
+
+       /*
+        * Provide a mutex to v4l2 core. It will be used to protect
+        * all fops and v4l2 ioctls.
+        */
+       vfd->lock = &ctx->mutex;
+       video_set_drvdata(vfd, ctx);
+
+       ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+       if (ret < 0)
+               return ret;
+
+       v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
+                 video_device_node_name(vfd));
+
+       ctx->alloc_ctx = vb2_dma_contig_init_ctx(vfd->v4l2_dev->dev);
+       if (IS_ERR(ctx->alloc_ctx)) {
+               ctx_err(ctx, "Failed to alloc vb2 context\n");
+               ret = PTR_ERR(ctx->alloc_ctx);
+               goto vdev_unreg;
+       }
+
+       return 0;
+
+vdev_unreg:
+       video_unregister_device(vfd);
+       return ret;
+}
+
+static struct device_node *
+of_get_next_port(const struct device_node *parent,
+                struct device_node *prev)
+{
+       struct device_node *port = NULL;
+
+       if (!parent)
+               return NULL;
+
+       if (!prev) {
+               struct device_node *ports;
+               /*
+                * It's the first call, we have to find a port subnode
+                * within this node or within an optional 'ports' node.
+                */
+               ports = of_get_child_by_name(parent, "ports");
+               if (ports)
+                       parent = ports;
+
+               port = of_get_child_by_name(parent, "port");
+
+               /* release the 'ports' node */
+               of_node_put(ports);
+       } else {
+               struct device_node *ports;
+
+               ports = of_get_parent(prev);
+               if (!ports)
+                       return NULL;
+
+               do {
+                       port = of_get_next_child(ports, prev);
+                       if (!port) {
+                               of_node_put(ports);
+                               return NULL;
+                       }
+                       prev = port;
+               } while (of_node_cmp(port->name, "port") != 0);
+       }
+
+       return port;
+}
+
+static struct device_node *
+of_get_next_endpoint(const struct device_node *parent,
+                    struct device_node *prev)
+{
+       struct device_node *ep = NULL;
+
+       if (!parent)
+               return NULL;
+
+       do {
+               ep = of_get_next_child(parent, prev);
+               if (!ep)
+                       return NULL;
+               prev = ep;
+       } while (of_node_cmp(ep->name, "endpoint") != 0);
+
+       return ep;
+}
+
+static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
+{
+       struct platform_device *pdev = ctx->dev->pdev;
+       struct device_node *ep_node, *port, *remote_ep,
+                       *sensor_node, *parent;
+       struct v4l2_of_endpoint *endpoint;
+       struct v4l2_async_subdev *asd;
+       u32 regval = 0;
+       int ret, index, found_port = 0, lane;
+
+       parent = pdev->dev.of_node;
+
+       asd = &ctx->asd;
+       endpoint = &ctx->endpoint;
+
+       ep_node = NULL;
+       port = NULL;
+       remote_ep = NULL;
+       sensor_node = NULL;
+       ret = -EINVAL;
+
+       ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
+       for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
+               port = of_get_next_port(parent, port);
+               if (!port) {
+                       ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
+                               index);
+                       goto cleanup_exit;
+               }
+
+               /* Match the slice number with <REG> */
+               of_property_read_u32(port, "reg", &regval);
+               ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
+                       index, inst, regval);
+               if ((regval == inst) && (index == inst)) {
+                       found_port = 1;
+                       break;
+               }
+       }
+
+       if (!found_port) {
+               ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
+                       inst);
+               goto cleanup_exit;
+       }
+
+       ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
+               inst);
+
+       ep_node = of_get_next_endpoint(port, ep_node);
+       if (!ep_node) {
+               ctx_dbg(3, ctx, "can't get next endpoint\n");
+               goto cleanup_exit;
+       }
+
+       sensor_node = of_graph_get_remote_port_parent(ep_node);
+       if (!sensor_node) {
+               ctx_dbg(3, ctx, "can't get remote parent\n");
+               goto cleanup_exit;
+       }
+       asd->match_type = V4L2_ASYNC_MATCH_OF;
+       asd->match.of.node = sensor_node;
+
+       remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0);
+       if (!remote_ep) {
+               ctx_dbg(3, ctx, "can't get remote-endpoint\n");
+               goto cleanup_exit;
+       }
+       v4l2_of_parse_endpoint(remote_ep, endpoint);
+
+       if (endpoint->bus_type != V4L2_MBUS_CSI2) {
+               ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
+                       inst, sensor_node->name);
+               goto cleanup_exit;
+       }
+
+       /* Store Virtual Channel number */
+       ctx->virtual_channel = endpoint->base.id;
+
+       ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
+       ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
+       ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
+       ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
+       ctx_dbg(3, ctx, "num_data_lanes=%d\n",
+               endpoint->bus.mipi_csi2.num_data_lanes);
+       ctx_dbg(3, ctx, "data_lanes= <\n");
+       for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
+               ctx_dbg(3, ctx, "\t%d\n",
+                       endpoint->bus.mipi_csi2.data_lanes[lane]);
+       ctx_dbg(3, ctx, "\t>\n");
+
+       ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
+               inst, sensor_node->name);
+
+       ctx->asd_list[0] = asd;
+       ctx->notifier.subdevs = ctx->asd_list;
+       ctx->notifier.num_subdevs = 1;
+       ctx->notifier.bound = cal_async_bound;
+       ctx->notifier.complete = cal_async_complete;
+       ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
+                                          &ctx->notifier);
+       if (ret) {
+               ctx_err(ctx, "Error registering async notifier\n");
+               ret = -EINVAL;
+       }
+
+cleanup_exit:
+       if (!remote_ep)
+               of_node_put(remote_ep);
+       if (!sensor_node)
+               of_node_put(sensor_node);
+       if (!ep_node)
+               of_node_put(ep_node);
+       if (!port)
+               of_node_put(port);
+
+       return ret;
+}
+
+static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
+{
+       struct cal_ctx *ctx;
+       struct v4l2_ctrl_handler *hdl;
+       int ret;
+
+       ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return 0;
+
+       /* save the cal_dev * for future ref */
+       ctx->dev = dev;
+
+       snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
+                "%s-%03d", CAL_MODULE_NAME, inst);
+       ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
+       if (ret)
+               goto err_exit;
+
+       hdl = &ctx->ctrl_handler;
+       ret = v4l2_ctrl_handler_init(hdl, 11);
+       if (ret) {
+               ctx_err(ctx, "Failed to init ctrl handler\n");
+               goto unreg_dev;
+       }
+       ctx->v4l2_dev.ctrl_handler = hdl;
+
+       /* Make sure Camera Core H/W register area is available */
+       ctx->cc = dev->cc[inst];
+
+       /* Store the instance id */
+       ctx->csi2_port = inst + 1;
+
+       ret = of_cal_create_instance(ctx, inst);
+       if (ret) {
+               ret = -EINVAL;
+               goto free_hdl;
+       }
+       return ctx;
+
+free_hdl:
+       v4l2_ctrl_handler_free(hdl);
+unreg_dev:
+       v4l2_device_unregister(&ctx->v4l2_dev);
+err_exit:
+       return 0;
+}
+
+static int cal_probe(struct platform_device *pdev)
+{
+       struct cal_dev *dev;
+       int ret;
+       int irq;
+
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       /* set pseudo v4l2 device name so we can use v4l2_printk */
+       strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
+               sizeof(dev->v4l2_dev.name));
+
+       /* save pdev pointer */
+       dev->pdev = pdev;
+
+       dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                               "cal_top");
+       dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
+       if (IS_ERR(dev->base))
+               return PTR_ERR(dev->base);
+
+       cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+               dev->res->name, &dev->res->start, &dev->res->end);
+
+       irq = platform_get_irq(pdev, 0);
+       cal_dbg(1, dev, "got irq# %d\n", irq);
+       ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
+                              dev);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, dev);
+
+       dev->cm = cm_create(dev);
+       if (IS_ERR(dev->cm))
+               return PTR_ERR(dev->cm);
+
+       dev->cc[0] = cc_create(dev, 0);
+       if (IS_ERR(dev->cc[0]))
+               return PTR_ERR(dev->cc[0]);
+
+       dev->cc[1] = cc_create(dev, 1);
+       if (IS_ERR(dev->cc[1]))
+               return PTR_ERR(dev->cc[1]);
+
+       dev->ctx[0] = NULL;
+       dev->ctx[1] = NULL;
+
+       dev->ctx[0] = cal_create_instance(dev, 0);
+       dev->ctx[1] = cal_create_instance(dev, 1);
+       if (!dev->ctx[0] && !dev->ctx[1]) {
+               cal_err(dev, "Neither port is configured, no point in staying up\n");
+               return -ENODEV;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+
+       ret = cal_runtime_get(dev);
+       if (ret)
+               goto runtime_disable;
+
+       /* Just check we can actually access the module */
+       cal_get_hwinfo(dev);
+
+       cal_runtime_put(dev);
+
+       return 0;
+
+runtime_disable:
+       pm_runtime_disable(&pdev->dev);
+       return ret;
+}
+
+static int cal_remove(struct platform_device *pdev)
+{
+       struct cal_dev *dev =
+               (struct cal_dev *)platform_get_drvdata(pdev);
+       struct cal_ctx *ctx;
+       int i;
+
+       cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
+
+       cal_runtime_get(dev);
+
+       for (i = 0; i < CAL_NUM_CONTEXT; i++) {
+               ctx = dev->ctx[i];
+               if (ctx) {
+                       ctx_dbg(1, ctx, "unregistering %s\n",
+                               video_device_node_name(&ctx->vdev));
+                       camerarx_phy_disable(ctx);
+                       v4l2_async_notifier_unregister(&ctx->notifier);
+                       vb2_dma_contig_cleanup_ctx(ctx->alloc_ctx);
+                       v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+                       v4l2_device_unregister(&ctx->v4l2_dev);
+                       video_unregister_device(&ctx->vdev);
+               }
+       }
+
+       cal_runtime_put(dev);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id cal_of_match[] = {
+       { .compatible = "ti,dra72-cal", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, cal_of_match);
+#endif
+
+static struct platform_driver cal_pdrv = {
+       .probe          = cal_probe,
+       .remove         = cal_remove,
+       .driver         = {
+               .name   = CAL_MODULE_NAME,
+               .of_match_table = of_match_ptr(cal_of_match),
+       },
+};
+
+module_platform_driver(cal_pdrv);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
new file mode 100644 (file)
index 0000000..82b3dcf
--- /dev/null
@@ -0,0 +1,479 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ *
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_CAL_REGS_H
+#define __TI_CAL_REGS_H
+
+#define CAL_NUM_CSI2_PORTS             2
+
+/* CAL register offsets */
+
+#define CAL_HL_REVISION                        0x0000
+#define CAL_HL_HWINFO                  0x0004
+#define CAL_HL_SYSCONFIG               0x0010
+#define CAL_HL_IRQ_EOI                 0x001c
+#define CAL_HL_IRQSTATUS_RAW(m)                (0x20U + ((m-1) * 0x10U))
+#define CAL_HL_IRQSTATUS(m)            (0x24U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_SET(m)                (0x28U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_CLR(m)                (0x2cU + ((m-1) * 0x10U))
+#define CAL_PIX_PROC(m)                        (0xc0U + ((m-1) * 0x4U))
+#define CAL_CTRL                       0x100
+#define CAL_CTRL1                      0x104
+#define CAL_LINE_NUMBER_EVT            0x108
+#define CAL_VPORT_CTRL1                        0x120
+#define CAL_VPORT_CTRL2                        0x124
+#define CAL_BYS_CTRL1                  0x130
+#define CAL_BYS_CTRL2                  0x134
+#define CAL_RD_DMA_CTRL                        0x140
+#define CAL_RD_DMA_PIX_ADDR            0x144
+#define CAL_RD_DMA_PIX_OFST            0x148
+#define CAL_RD_DMA_XSIZE               0x14c
+#define CAL_RD_DMA_YSIZE               0x150
+#define CAL_RD_DMA_INIT_ADDR           0x154
+#define CAL_RD_DMA_INIT_OFST           0x168
+#define CAL_RD_DMA_CTRL2               0x16c
+#define CAL_WR_DMA_CTRL(m)             (0x200U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_ADDR(m)             (0x204U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_OFST(m)             (0x208U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_XSIZE(m)            (0x20cU + ((m-1) * 0x10U))
+#define CAL_CSI2_PPI_CTRL(m)           (0x300U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_CFG(m)      (0x304U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m)        (0x308U + ((m-1) * 0x80U))
+#define CAL_CSI2_SHORT_PACKET(m)       (0x30cU + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQENABLE(m)        (0x310U + ((m-1) * 0x80U))
+#define CAL_CSI2_TIMING(m)             (0x314U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQENABLE(m)       (0x318U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQSTATUS(m)       (0x328U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX0(m)               (0x330U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX1(m)               (0x334U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX2(m)               (0x338U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX3(m)               (0x33cU + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX4(m)               (0x340U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX5(m)               (0x344U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX6(m)               (0x348U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX7(m)               (0x34cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS0(m)            (0x350U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS1(m)            (0x354U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS2(m)            (0x358U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS3(m)            (0x35cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS4(m)            (0x360U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS5(m)            (0x364U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS6(m)            (0x368U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS7(m)            (0x36cU + ((m-1) * 0x80U))
+
+/* CAL CSI2 PHY register offsets */
+#define CAL_CSI2_PHY_REG0              0x000
+#define CAL_CSI2_PHY_REG1              0x004
+#define CAL_CSI2_PHY_REG2              0x008
+
+/* CAL Control Module Core Camerrx Control register offsets */
+#define CM_CTRL_CORE_CAMERRX_CONTROL   0x000
+
+/*********************************************************************
+* Generic value used in various field below
+*********************************************************************/
+
+#define CAL_GEN_DISABLE                        0
+#define CAL_GEN_ENABLE                 1
+#define CAL_GEN_FALSE                  0
+#define CAL_GEN_TRUE                   1
+
+/*********************************************************************
+* Field Definition Macros
+*********************************************************************/
+
+#define CAL_HL_REVISION_MINOR_MASK             GENMASK(5, 0)
+#define CAL_HL_REVISION_CUSTOM_MASK            GENMASK(7, 6)
+#define CAL_HL_REVISION_MAJOR_MASK             GENMASK(10, 8)
+#define CAL_HL_REVISION_RTL_MASK               GENMASK(15, 11)
+#define CAL_HL_REVISION_FUNC_MASK              GENMASK(27, 16)
+#define CAL_HL_REVISION_SCHEME_MASK            GENMASK(31, 30)
+#define CAL_HL_REVISION_SCHEME_H08                     1
+#define CAL_HL_REVISION_SCHEME_LEGACY                  0
+
+#define CAL_HL_HWINFO_WFIFO_MASK               GENMASK(3, 0)
+#define CAL_HL_HWINFO_RFIFO_MASK               GENMASK(7, 4)
+#define CAL_HL_HWINFO_PCTX_MASK                        GENMASK(12, 8)
+#define CAL_HL_HWINFO_WCTX_MASK                        GENMASK(18, 13)
+#define CAL_HL_HWINFO_VFIFO_MASK               GENMASK(22, 19)
+#define CAL_HL_HWINFO_NCPORT_MASK              GENMASK(27, 23)
+#define CAL_HL_HWINFO_NPPI_CTXS0_MASK          GENMASK(29, 28)
+#define CAL_HL_HWINFO_NPPI_CTXS1_MASK          GENMASK(31, 30)
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO               0
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR               1
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT              2
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED           3
+
+#define CAL_HL_SYSCONFIG_SOFTRESET_MASK                BIT_MASK(0)
+#define CAL_HL_SYSCONFIG_SOFTRESET_DONE                        0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING             0x1
+#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION            0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_RESET               0x1
+#define CAL_HL_SYSCONFIG_IDLE_MASK             GENMASK(3, 2)
+#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE                        0
+#define CAL_HL_SYSCONFIG_IDLEMODE_NO                   1
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1               2
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2               3
+
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK                BIT_MASK(0)
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0               0
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0                        0
+
+#define CAL_HL_IRQ_MASK(m)                     BIT_MASK(m-1)
+#define CAL_HL_IRQ_NOACTION                            0x0
+#define CAL_HL_IRQ_ENABLE                              0x1
+#define CAL_HL_IRQ_CLEAR                               0x1
+#define CAL_HL_IRQ_DISABLED                            0x0
+#define CAL_HL_IRQ_ENABLED                             0x1
+#define CAL_HL_IRQ_PENDING                             0x1
+
+#define CAL_PIX_PROC_EN_MASK                   BIT_MASK(0)
+#define CAL_PIX_PROC_EXTRACT_MASK              GENMASK(4, 1)
+#define CAL_PIX_PROC_EXTRACT_B6                                0x0
+#define CAL_PIX_PROC_EXTRACT_B7                                0x1
+#define CAL_PIX_PROC_EXTRACT_B8                                0x2
+#define CAL_PIX_PROC_EXTRACT_B10                       0x3
+#define CAL_PIX_PROC_EXTRACT_B10_MIPI                  0x4
+#define CAL_PIX_PROC_EXTRACT_B12                       0x5
+#define CAL_PIX_PROC_EXTRACT_B12_MIPI                  0x6
+#define CAL_PIX_PROC_EXTRACT_B14                       0x7
+#define CAL_PIX_PROC_EXTRACT_B14_MIPI                  0x8
+#define CAL_PIX_PROC_EXTRACT_B16_BE                    0x9
+#define CAL_PIX_PROC_EXTRACT_B16_LE                    0xa
+#define CAL_PIX_PROC_DPCMD_MASK                        GENMASK(9, 5)
+#define CAL_PIX_PROC_DPCMD_BYPASS                      0x0
+#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1                 0x2
+#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1                 0x8
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1                 0x4
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2                 0x5
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1                 0x6
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2                 0x7
+#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1                 0xa
+#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1                 0xc
+#define CAL_PIX_PROC_DPCMD_DPCM_14_10                  0xe
+#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1                 0x10
+#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1                        0x12
+#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1                        0x14
+#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1                 0x16
+#define CAL_PIX_PROC_DPCME_MASK                        GENMASK(15, 11)
+#define CAL_PIX_PROC_DPCME_BYPASS                      0x0
+#define CAL_PIX_PROC_DPCME_DPCM_10_8_1                 0x2
+#define CAL_PIX_PROC_DPCME_DPCM_12_8_1                 0x8
+#define CAL_PIX_PROC_DPCME_DPCM_14_10                  0xe
+#define CAL_PIX_PROC_DPCME_DPCM_14_8_1                 0x10
+#define CAL_PIX_PROC_DPCME_DPCM_16_12_1                        0x12
+#define CAL_PIX_PROC_DPCME_DPCM_16_10_1                        0x14
+#define CAL_PIX_PROC_DPCME_DPCM_16_8_1                 0x16
+#define CAL_PIX_PROC_PACK_MASK                 GENMASK(18, 16)
+#define CAL_PIX_PROC_PACK_B8                           0x0
+#define CAL_PIX_PROC_PACK_B10_MIPI                     0x2
+#define CAL_PIX_PROC_PACK_B12                          0x3
+#define CAL_PIX_PROC_PACK_B12_MIPI                     0x4
+#define CAL_PIX_PROC_PACK_B16                          0x5
+#define CAL_PIX_PROC_PACK_ARGB                         0x6
+#define CAL_PIX_PROC_CPORT_MASK                        GENMASK(23, 19)
+
+#define CAL_CTRL_POSTED_WRITES_MASK            BIT_MASK(0)
+#define CAL_CTRL_POSTED_WRITES_NONPOSTED               0
+#define CAL_CTRL_POSTED_WRITES                         1
+#define CAL_CTRL_TAGCNT_MASK                   GENMASK(4, 1)
+#define CAL_CTRL_BURSTSIZE_MASK                        GENMASK(6, 5)
+#define CAL_CTRL_BURSTSIZE_BURST16                     0x0
+#define CAL_CTRL_BURSTSIZE_BURST32                     0x1
+#define CAL_CTRL_BURSTSIZE_BURST64                     0x2
+#define CAL_CTRL_BURSTSIZE_BURST128                    0x3
+#define CAL_CTRL_LL_FORCE_STATE_MASK           GENMASK(12, 7)
+#define CAL_CTRL_MFLAGL_MASK                   GENMASK(20, 13)
+#define CAL_CTRL_PWRSCPCLK_MASK                        BIT_MASK(21)
+#define CAL_CTRL_PWRSCPCLK_AUTO                                0
+#define CAL_CTRL_PWRSCPCLK_FORCE                       1
+#define CAL_CTRL_RD_DMA_STALL_MASK             BIT_MASK(22)
+#define CAL_CTRL_MFLAGH_MASK                   GENMASK(31, 24)
+
+#define CAL_CTRL1_PPI_GROUPING_MASK            GENMASK(1, 0)
+#define CAL_CTRL1_PPI_GROUPING_DISABLED                        0
+#define CAL_CTRL1_PPI_GROUPING_RESERVED                        1
+#define CAL_CTRL1_PPI_GROUPING_0                       2
+#define CAL_CTRL1_PPI_GROUPING_1                       3
+#define CAL_CTRL1_INTERLEAVE01_MASK            GENMASK(3, 2)
+#define CAL_CTRL1_INTERLEAVE01_DISABLED                        0
+#define CAL_CTRL1_INTERLEAVE01_PIX1                    1
+#define CAL_CTRL1_INTERLEAVE01_PIX4                    2
+#define CAL_CTRL1_INTERLEAVE01_RESERVED                        3
+#define CAL_CTRL1_INTERLEAVE23_MASK            GENMASK(5, 4)
+#define CAL_CTRL1_INTERLEAVE23_DISABLED                        0
+#define CAL_CTRL1_INTERLEAVE23_PIX1                    1
+#define CAL_CTRL1_INTERLEAVE23_PIX4                    2
+#define CAL_CTRL1_INTERLEAVE23_RESERVED                        3
+
+#define CAL_LINE_NUMBER_EVT_CPORT_MASK         GENMASK(4, 0)
+#define CAL_LINE_NUMBER_EVT_MASK               GENMASK(29, 16)
+
+#define CAL_VPORT_CTRL1_PCLK_MASK              GENMASK(16, 0)
+#define CAL_VPORT_CTRL1_XBLK_MASK              GENMASK(24, 17)
+#define CAL_VPORT_CTRL1_YBLK_MASK              GENMASK(30, 25)
+#define CAL_VPORT_CTRL1_WIDTH_MASK             BIT_MASK(31)
+#define CAL_VPORT_CTRL1_WIDTH_ONE                      0
+#define CAL_VPORT_CTRL1_WIDTH_TWO                      1
+
+#define CAL_VPORT_CTRL2_CPORT_MASK             GENMASK(4, 0)
+#define CAL_VPORT_CTRL2_FREERUNNING_MASK       BIT_MASK(15)
+#define CAL_VPORT_CTRL2_FREERUNNING_GATED              0
+#define CAL_VPORT_CTRL2_FREERUNNING_FREE               1
+#define CAL_VPORT_CTRL2_FS_RESETS_MASK         BIT_MASK(16)
+#define CAL_VPORT_CTRL2_FS_RESETS_NO                   0
+#define CAL_VPORT_CTRL2_FS_RESETS_YES                  1
+#define CAL_VPORT_CTRL2_FSM_RESET_MASK         BIT_MASK(17)
+#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT             0
+#define CAL_VPORT_CTRL2_FSM_RESET                      1
+#define CAL_VPORT_CTRL2_RDY_THR_MASK           GENMASK(31, 18)
+
+#define CAL_BYS_CTRL1_PCLK_MASK                        GENMASK(16, 0)
+#define CAL_BYS_CTRL1_XBLK_MASK                        GENMASK(24, 17)
+#define CAL_BYS_CTRL1_YBLK_MASK                        GENMASK(30, 25)
+#define CAL_BYS_CTRL1_BYSINEN_MASK             BIT_MASK(31)
+
+#define CAL_BYS_CTRL2_CPORTIN_MASK             GENMASK(4, 0)
+#define CAL_BYS_CTRL2_CPORTOUT_MASK            GENMASK(9, 5)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK      BIT_MASK(10)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO                        0
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES               1
+#define CAL_BYS_CTRL2_FREERUNNING_MASK         BIT_MASK(11)
+#define CAL_BYS_CTRL2_FREERUNNING_NO                   0
+#define CAL_BYS_CTRL2_FREERUNNING_YES                  1
+
+#define CAL_RD_DMA_CTRL_GO_MASK                        BIT_MASK(0)
+#define CAL_RD_DMA_CTRL_GO_DIS                         0
+#define CAL_RD_DMA_CTRL_GO_EN                          1
+#define CAL_RD_DMA_CTRL_GO_IDLE                                0
+#define CAL_RD_DMA_CTRL_GO_BUSY                                1
+#define CAL_RD_DMA_CTRL_INIT_MASK              BIT_MASK(1)
+#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK                GENMASK(10, 2)
+#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK       GENMASK(14, 11)
+#define CAL_RD_DMA_CTRL_PCLK_MASK              GENMASK(31, 15)
+
+#define CAL_RD_DMA_PIX_ADDR_MASK               GENMASK(31, 3)
+
+#define CAL_RD_DMA_PIX_OFST_MASK               GENMASK(31, 4)
+
+#define CAL_RD_DMA_XSIZE_MASK                  GENMASK(31, 19)
+
+#define CAL_RD_DMA_YSIZE_MASK                  GENMASK(29, 16)
+
+#define CAL_RD_DMA_INIT_ADDR_MASK              GENMASK(31, 3)
+
+#define CAL_RD_DMA_INIT_OFST_MASK              GENMASK(31, 3)
+
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK                GENMASK(2, 0)
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS                 0
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE                 1
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR                        2
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN             3
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR           4
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED            5
+#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK       BIT_MASK(3)
+#define CAL_RD_DMA_CTRL2_PATTERN_MASK          GENMASK(5, 4)
+#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR                        0
+#define CAL_RD_DMA_CTRL2_PATTERN_YUV420                        1
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2              2
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4              3
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK   BIT_MASK(6)
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING    0
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT  1
+#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK                GENMASK(29, 16)
+
+#define CAL_WR_DMA_CTRL_MODE_MASK              GENMASK(2, 0)
+#define CAL_WR_DMA_CTRL_MODE_DIS                       0
+#define CAL_WR_DMA_CTRL_MODE_SHD                       1
+#define CAL_WR_DMA_CTRL_MODE_CNT                       2
+#define CAL_WR_DMA_CTRL_MODE_CNT_INIT                  3
+#define CAL_WR_DMA_CTRL_MODE_CONST                     4
+#define CAL_WR_DMA_CTRL_MODE_RESERVED                  5
+#define CAL_WR_DMA_CTRL_PATTERN_MASK           GENMASK(4, 3)
+#define CAL_WR_DMA_CTRL_PATTERN_LINEAR                 0
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2               2
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4               3
+#define CAL_WR_DMA_CTRL_PATTERN_RESERVED               1
+#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK                BIT_MASK(5)
+#define CAL_WR_DMA_CTRL_DTAG_MASK              GENMASK(8, 6)
+#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR                   0
+#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT                   1
+#define CAL_WR_DMA_CTRL_DTAG                           2
+#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR                   3
+#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT                   4
+#define CAL_WR_DMA_CTRL_DTAG_D5                                5
+#define CAL_WR_DMA_CTRL_DTAG_D6                                6
+#define CAL_WR_DMA_CTRL_DTAG_D7                                7
+#define CAL_WR_DMA_CTRL_CPORT_MASK             GENMASK(13, 9)
+#define CAL_WR_DMA_CTRL_STALL_RD_MASK          BIT_MASK(14)
+#define CAL_WR_DMA_CTRL_YSIZE_MASK             GENMASK(31, 18)
+
+#define CAL_WR_DMA_ADDR_MASK                   GENMASK(31, 4)
+
+#define CAL_WR_DMA_OFST_MASK                   GENMASK(18, 4)
+#define CAL_WR_DMA_OFST_CIRC_MODE_MASK         GENMASK(23, 22)
+#define CAL_WR_DMA_OFST_CIRC_MODE_ONE                  1
+#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR                 2
+#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR            3
+#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED             0
+#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK         GENMASK(31, 24)
+
+#define CAL_WR_DMA_XSIZE_XSKIP_MASK            GENMASK(15, 3)
+#define CAL_WR_DMA_XSIZE_MASK                  GENMASK(31, 19)
+
+#define CAL_CSI2_PPI_CTRL_IF_EN_MASK           BIT_MASK(0)
+#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK          BIT_MASK(2)
+#define CAL_CSI2_PPI_CTRL_FRAME_MASK           BIT_MASK(3)
+#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE              0
+#define CAL_CSI2_PPI_CTRL_FRAME                                1
+
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK     GENMASK(2, 0)
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5                      5
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4                      4
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3                      3
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2                      2
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1                      1
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED               0
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK          BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS                   0
+#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS                   1
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK     GENMASK(6, 4)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK          BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK     GENMASK(10, 8)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK          BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK     GENMASK(14, 12)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK          BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK     GENMASK(18, 16)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK          BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK           BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK         GENMASK(26, 25)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF            0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON             1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP            2
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK            GENMASK(28, 27)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF               0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON                        1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP               2
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK         BIT_MASK(29)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED       1
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING         0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK         BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL                      0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL          1
+
+#define CAL_CSI2_SHORT_PACKET_MASK     GENMASK(23, 0)
+
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK          BIT_MASK(0)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK          BIT_MASK(1)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK          BIT_MASK(2)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK          BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK          BIT_MASK(4)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK      BIT_MASK(5)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK      BIT_MASK(6)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK      BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK      BIT_MASK(8)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK      BIT_MASK(9)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK            BIT_MASK(10)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK            BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK            BIT_MASK(12)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK            BIT_MASK(13)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK            BIT_MASK(14)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK                BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK                BIT_MASK(16)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK                BIT_MASK(17)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK                BIT_MASK(18)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK                BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK         BIT_MASK(20)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK         BIT_MASK(21)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK         BIT_MASK(22)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK         BIT_MASK(23)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK         BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK  BIT_MASK(25)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK   BIT_MASK(26)
+#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK           BIT_MASK(27)
+#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK       BIT_MASK(28)
+#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK  BIT_MASK(30)
+
+#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK    GENMASK(12, 0)
+#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK         BIT_MASK(13)
+#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK                BIT_MASK(14)
+#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK         BIT_MASK(15)
+
+#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK                  BIT_MASK(0)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK                  BIT_MASK(1)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK                  BIT_MASK(2)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK                  BIT_MASK(3)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK                  BIT_MASK(4)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK     BIT_MASK(5)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK                  BIT_MASK(8)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK                  BIT_MASK(9)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK                  BIT_MASK(10)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK                  BIT_MASK(11)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK                  BIT_MASK(12)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK     BIT_MASK(13)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK                  BIT_MASK(16)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK                  BIT_MASK(17)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK                  BIT_MASK(18)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK                  BIT_MASK(19)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK                  BIT_MASK(20)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK     BIT_MASK(21)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK                  BIT_MASK(24)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK                  BIT_MASK(25)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK                  BIT_MASK(26)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK                  BIT_MASK(27)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK                  BIT_MASK(28)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK     BIT_MASK(29)
+
+#define CAL_CSI2_CTX_DT_MASK           GENMASK(5, 0)
+#define CAL_CSI2_CTX_VC_MASK           GENMASK(7, 6)
+#define CAL_CSI2_CTX_CPORT_MASK                GENMASK(12, 8)
+#define CAL_CSI2_CTX_ATT_MASK          BIT_MASK(13)
+#define CAL_CSI2_CTX_ATT_PIX                   0
+#define CAL_CSI2_CTX_ATT                       1
+#define CAL_CSI2_CTX_PACK_MODE_MASK    BIT_MASK(14)
+#define CAL_CSI2_CTX_PACK_MODE_LINE            0
+#define CAL_CSI2_CTX_PACK_MODE_FRAME           1
+#define CAL_CSI2_CTX_LINES_MASK                GENMASK(29, 16)
+
+#define CAL_CSI2_STATUS_FRAME_MASK     GENMASK(15, 0)
+
+#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK      GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG0_THS_TERM_MASK                GENMASK(15, 8)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK   BIT_MASK(24)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE                1
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE         0
+
+#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK                     GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK              GENMASK(9, 8)
+#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK            GENMASK(17, 10)
+#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK                       GENMASK(24, 18)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK      BIT_MASK(25)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR             1
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS           0
+#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK               GENMASK(29, 28)
+
+#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK               GENMASK(23, 0)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK          GENMASK(25, 24)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK          GENMASK(27, 26)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK          GENMASK(29, 28)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK          GENMASK(31, 30)
+
+#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK                    BIT_MASK(0)
+#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK                      GENMASK(2, 1)
+#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK                   GENMASK(4, 3)
+#define CM_CAMERRX_CTRL_CSI1_MODE_MASK                         BIT_MASK(5)
+#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK                    BIT_MASK(10)
+#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK                      GENMASK(12, 11)
+#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK                   GENMASK(16, 13)
+#define CM_CAMERRX_CTRL_CSI0_MODE_MASK                         BIT_MASK(17)
+
+#endif
index 418113c998013d5fabc083364b662dc17d259e79..c4b5fab836663d2ea07f9c240327d52f80529e83 100644 (file)
@@ -1074,7 +1074,7 @@ static int __init vim2m_init(void)
        if (ret)
                platform_device_unregister(&vim2m_pdev);
 
-       return 0;
+       return ret;
 }
 
 module_init(vim2m_init);
index 9baed6a1033433e9ebd3f23eb35d7d65b7d2f2ca..93fbaee69675a3988033c2af1af9bd3613597d7f 100644 (file)
@@ -418,6 +418,8 @@ static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsi
 
                tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
        }
+       if (tpg_g_interleaved(tpg))
+               tpg->bytesperline[1] = tpg->bytesperline[0];
 }
 
 
index 42dff9d020afaf66c70e528b58d2f005a53203f3..533bc796391ed3053a5f21c92e94eba92cdc46a4 100644 (file)
@@ -256,7 +256,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
 
        /* Create links. */
        list_for_each_entry(entity, &vsp1->entities, list_dev) {
-               if (entity->type == VSP1_ENTITY_LIF) {
+               if (entity->type == VSP1_ENTITY_WPF) {
                        ret = vsp1_wpf_create_links(vsp1, entity);
                        if (ret < 0)
                                goto done;
@@ -264,7 +264,10 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
                        ret = vsp1_rpf_create_links(vsp1, entity);
                        if (ret < 0)
                                goto done;
-               } else {
+               }
+
+               if (entity->type != VSP1_ENTITY_LIF &&
+                   entity->type != VSP1_ENTITY_RPF) {
                        ret = vsp1_create_links(vsp1, entity);
                        if (ret < 0)
                                goto done;
index 637d0d6f79fba5fb511b6fba37fb24875d913dbd..b4dca57d1ae3f6d78a3417251089939a7da15f77 100644 (file)
@@ -515,7 +515,7 @@ static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
        bool stopped;
 
        spin_lock_irqsave(&pipe->irqlock, flags);
-       stopped = pipe->state == VSP1_PIPELINE_STOPPED,
+       stopped = pipe->state == VSP1_PIPELINE_STOPPED;
        spin_unlock_irqrestore(&pipe->irqlock, flags);
 
        return stopped;
index 859f0c08ee0543c67af2a4ecbf368906722fbba0..271f725b17e85c77017f08ac9181510369f16872 100644 (file)
@@ -1530,11 +1530,11 @@ static int si476x_radio_probe(struct platform_device *pdev)
        if (si476x_core_has_diversity(radio->core)) {
                si476x_ctrls[SI476X_IDX_DIVERSITY_MODE].def =
                        si476x_phase_diversity_mode_to_idx(radio->core->diversity_mode);
-               si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE);
+               rval = si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE);
                if (rval < 0)
                        goto exit;
 
-               si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK);
+               rval = si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK);
                if (rval < 0)
                        goto exit;
        }
index ebc73b03424960fe65f3db7721ffedb79a0e1d41..3f9e6df7d837ac27d060a1d7a5413de92273f57f 100644 (file)
@@ -68,7 +68,7 @@ MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
 /* RDS buffer blocks */
 static u32 default_rds_buf = 300;
 module_param(default_rds_buf, uint, 0444);
-MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
+MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
 
 /* Radio Nr */
 static u32 radio_nr = -1;
index 18adf580f502443bafc7c0e310d2556e5908f7dc..c96da3aaf00b56b0489644b835a466942727bfe9 100644 (file)
@@ -80,17 +80,24 @@ static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
 }
 
 /* enter extended function mode */
-static inline void nvt_efm_enable(struct nvt_dev *nvt)
+static inline int nvt_efm_enable(struct nvt_dev *nvt)
 {
+       if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
+               return -EBUSY;
+
        /* Enabling Extended Function Mode explicitly requires writing 2x */
        outb(EFER_EFM_ENABLE, nvt->cr_efir);
        outb(EFER_EFM_ENABLE, nvt->cr_efir);
+
+       return 0;
 }
 
 /* exit extended function mode */
 static inline void nvt_efm_disable(struct nvt_dev *nvt)
 {
        outb(EFER_EFM_DISABLE, nvt->cr_efir);
+
+       release_region(nvt->cr_efir, 2);
 }
 
 /*
@@ -100,8 +107,25 @@ static inline void nvt_efm_disable(struct nvt_dev *nvt)
  */
 static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
 {
-       outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
-       outb(ldev, nvt->cr_efdr);
+       nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
+}
+
+/* select and enable logical device with setting EFM mode*/
+static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+       nvt_efm_enable(nvt);
+       nvt_select_logical_dev(nvt, ldev);
+       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+       nvt_efm_disable(nvt);
+}
+
+/* select and disable logical device with setting EFM mode*/
+static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+       nvt_efm_enable(nvt);
+       nvt_select_logical_dev(nvt, ldev);
+       nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+       nvt_efm_disable(nvt);
 }
 
 /* write val to cir config register */
@@ -137,6 +161,22 @@ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
        return val;
 }
 
+/* don't override io address if one is set already */
+static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
+{
+       unsigned long old_addr;
+
+       old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
+       old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
+
+       if (old_addr)
+               *ioaddr = old_addr;
+       else {
+               nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
+               nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
+       }
+}
+
 /* dump current cir register contents */
 static void cir_dump_regs(struct nvt_dev *nvt)
 {
@@ -251,7 +291,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
 
 
 /* detect hardware features */
-static void nvt_hw_detect(struct nvt_dev *nvt)
+static int nvt_hw_detect(struct nvt_dev *nvt)
 {
        const char *chip_name;
        int chip_id;
@@ -266,10 +306,17 @@ static void nvt_hw_detect(struct nvt_dev *nvt)
                nvt_efm_enable(nvt);
                nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
        }
-
        nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
 
+       nvt_efm_disable(nvt);
+
        chip_id = nvt->chip_major << 8 | nvt->chip_minor;
+       if (chip_id == NVT_INVALID) {
+               dev_err(&nvt->pdev->dev,
+                       "No device found on either EFM port\n");
+               return -ENODEV;
+       }
+
        chip_name = nvt_find_chip(nvt, chip_id);
 
        /* warn, but still let the driver load, if we don't know this chip */
@@ -282,7 +329,7 @@ static void nvt_hw_detect(struct nvt_dev *nvt)
                         "found %s or compatible: chip id: 0x%02x 0x%02x",
                         chip_name, nvt->chip_major, nvt->chip_minor);
 
-       nvt_efm_disable(nvt);
+       return 0;
 }
 
 static void nvt_cir_ldev_init(struct nvt_dev *nvt)
@@ -305,12 +352,10 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt)
        val |= psval;
        nvt_cr_write(nvt, val, psreg);
 
-       /* Select CIR logical device and enable */
+       /* Select CIR logical device */
        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 
-       nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
-       nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+       nvt_set_ioaddr(nvt, &nvt->cir_addr);
 
        nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
 
@@ -320,7 +365,7 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt)
 
 static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
 {
-       /* Select ACPI logical device, enable it and CIR Wake */
+       /* Select ACPI logical device and anable it */
        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 
@@ -330,12 +375,10 @@ static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
        /* enable pme interrupt of cir wakeup event */
        nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
 
-       /* Select CIR Wake logical device and enable */
+       /* Select CIR Wake logical device */
        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 
-       nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
-       nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+       nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
 
        nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
 
@@ -355,11 +398,19 @@ static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
 /* clear out the hardware's cir wake rx fifo */
 static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
 {
-       u8 val;
+       u8 val, config;
+
+       config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
+
+       /* clearing wake fifo works in learning mode only */
+       nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
+                              CIR_WAKE_IRCON);
 
        val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
        nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
                               CIR_WAKE_FIFOCON);
+
+       nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
 }
 
 /* clear out the hardware's cir tx fifo */
@@ -408,6 +459,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
 
        /* and finally, enable interrupts */
        nvt_set_cir_iren(nvt);
+
+       /* enable the CIR logical device */
+       nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
 }
 
 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
@@ -442,10 +496,15 @@ static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 
        /* clear any and all stray interrupts */
        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+
+       /* enable the CIR WAKE logical device */
+       nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 }
 
 static void nvt_enable_wake(struct nvt_dev *nvt)
 {
+       unsigned long flags;
+
        nvt_efm_enable(nvt);
 
        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
@@ -457,12 +516,16 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
 
        nvt_efm_disable(nvt);
 
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
+
        nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
                               CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
                               CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
                               CIR_WAKE_IRCON);
        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
 
 #if 0 /* Currently unused */
@@ -670,7 +733,6 @@ static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
 /* copy data from hardware rx fifo into driver buffer */
 static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 {
-       unsigned long flags;
        u8 fifocount, val;
        unsigned int b_idx;
        bool overrun = false;
@@ -689,8 +751,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 
        nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
 
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-
        b_idx = nvt->pkts;
 
        /* This should never happen, but lets check anyway... */
@@ -712,8 +772,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 
        if (overrun)
                nvt_handle_rx_fifo_overrun(nvt);
-
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
 
 static void nvt_cir_log_irqs(u8 status, u8 iren)
@@ -736,16 +794,13 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
 static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
 {
        unsigned long flags;
-       bool tx_inactive;
        u8 tx_state;
 
        spin_lock_irqsave(&nvt->tx.lock, flags);
        tx_state = nvt->tx.tx_state;
        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 
-       tx_inactive = (tx_state == ST_TX_NONE);
-
-       return tx_inactive;
+       return tx_state == ST_TX_NONE;
 }
 
 /* interrupt service routine for incoming and outgoing CIR data */
@@ -757,9 +812,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 
        nvt_dbg_verbose("%s firing", __func__);
 
-       nvt_efm_enable(nvt);
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_efm_disable(nvt);
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
 
        /*
         * Get IR Status register contents. Write 1 to ack/clear
@@ -775,9 +828,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
         *   0: CIR_IRSTS_GH  - Min Length Detected
         */
        status = nvt_cir_reg_read(nvt, CIR_IRSTS);
-       if (!status) {
+       iren = nvt_cir_reg_read(nvt, CIR_IREN);
+
+       /* IRQ may be shared with CIR WAKE, therefore check for each
+        * status bit whether the related interrupt source is enabled
+        */
+       if (!(status & iren)) {
+               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
                nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
-               nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
                return IRQ_NONE;
        }
 
@@ -785,13 +843,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
        nvt_cir_reg_write(nvt, status, CIR_IRSTS);
        nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
 
-       /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
-       iren = nvt_cir_reg_read(nvt, CIR_IREN);
-       if (!iren) {
-               nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
-               return IRQ_NONE;
-       }
-
        nvt_cir_log_irqs(status, iren);
 
        if (status & CIR_IRSTS_RTR) {
@@ -805,16 +856,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
                if (nvt_cir_tx_inactive(nvt))
                        nvt_get_rx_ir_data(nvt);
 
-               spin_lock_irqsave(&nvt->nvt_lock, flags);
-
                cur_state = nvt->study_state;
 
-               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
                if (cur_state == ST_STUDY_NONE)
                        nvt_clear_cir_fifo(nvt);
        }
 
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
        if (status & CIR_IRSTS_TE)
                nvt_clear_tx_fifo(nvt);
 
@@ -863,9 +912,18 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
 
        nvt_dbg_wake("%s firing", __func__);
 
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
+
        status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
-       if (!status)
+       iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
+
+       /* IRQ may be shared with CIR, therefore check for each
+        * status bit whether the related interrupt source is enabled
+        */
+       if (!(status & iren)) {
+               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
                return IRQ_NONE;
+       }
 
        if (status & CIR_WAKE_IRSTS_IR_PENDING)
                nvt_clear_cir_wake_fifo(nvt);
@@ -873,13 +931,6 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
        nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
 
-       /* Interrupt may be shared with CIR, bail if Wake not enabled */
-       iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
-       if (!iren) {
-               nvt_dbg_wake("%s exiting, wake not enabled", __func__);
-               return IRQ_HANDLED;
-       }
-
        if ((status & CIR_WAKE_IRSTS_PE) &&
            (nvt->wake_state == ST_WAKE_START)) {
                while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
@@ -888,39 +939,21 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
                }
 
                nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
-               spin_lock_irqsave(&nvt->nvt_lock, flags);
                nvt->wake_state = ST_WAKE_FINISH;
-               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
        }
 
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
        nvt_dbg_wake("%s done", __func__);
        return IRQ_HANDLED;
 }
 
-static void nvt_enable_cir(struct nvt_dev *nvt)
+static void nvt_disable_cir(struct nvt_dev *nvt)
 {
-       /* set function enable flags */
-       nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
-                         CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
-                         CIR_IRCON);
-
-       nvt_efm_enable(nvt);
-
-       /* enable the CIR logical device */
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
-
-       /* clear all pending interrupts */
-       nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+       unsigned long flags;
 
-       /* enable interrupts */
-       nvt_set_cir_iren(nvt);
-}
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
 
-static void nvt_disable_cir(struct nvt_dev *nvt)
-{
        /* disable CIR interrupts */
        nvt_cir_reg_write(nvt, 0, CIR_IREN);
 
@@ -934,13 +967,10 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
        nvt_clear_cir_fifo(nvt);
        nvt_clear_tx_fifo(nvt);
 
-       nvt_efm_enable(nvt);
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
        /* disable the CIR logical device */
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
+       nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
 }
 
 static int nvt_open(struct rc_dev *dev)
@@ -949,20 +979,31 @@ static int nvt_open(struct rc_dev *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&nvt->nvt_lock, flags);
-       nvt_enable_cir(nvt);
+
+       /* set function enable flags */
+       nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+                         CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+                         CIR_IRCON);
+
+       /* clear all pending interrupts */
+       nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+       /* enable interrupts */
+       nvt_set_cir_iren(nvt);
+
        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
+       /* enable the CIR logical device */
+       nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
+
        return 0;
 }
 
 static void nvt_close(struct rc_dev *dev)
 {
        struct nvt_dev *nvt = dev->priv;
-       unsigned long flags;
 
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
        nvt_disable_cir(nvt);
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
 
 /* Allocate memory, probe hardware, and initialize everything */
@@ -1024,7 +1065,9 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 
        init_waitqueue_head(&nvt->tx.queue);
 
-       nvt_hw_detect(nvt);
+       ret = nvt_hw_detect(nvt);
+       if (ret)
+               goto exit_free_dev_rdev;
 
        /* Initialize CIR & CIR Wake Logical Devices */
        nvt_efm_enable(nvt);
@@ -1032,7 +1075,10 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        nvt_cir_wake_ldev_init(nvt);
        nvt_efm_disable(nvt);
 
-       /* Initialize CIR & CIR Wake Config Registers */
+       /*
+        * Initialize CIR & CIR Wake Config Registers
+        * and enable logical devices
+        */
        nvt_cir_regs_init(nvt);
        nvt_cir_wake_regs_init(nvt);
 
@@ -1079,12 +1125,12 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
                goto exit_unregister_device;
 
        if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
-                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
                goto exit_unregister_device;
 
        if (devm_request_irq(&pdev->dev, nvt->cir_wake_irq,
                             nvt_cir_wake_isr, IRQF_SHARED,
-                            NVT_DRIVER_NAME, (void *)nvt))
+                            NVT_DRIVER_NAME "-wake", (void *)nvt))
                goto exit_unregister_device;
 
        device_init_wakeup(&pdev->dev, true);
@@ -1109,15 +1155,11 @@ exit_free_dev_rdev:
 static void nvt_remove(struct pnp_dev *pdev)
 {
        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
-       unsigned long flags;
 
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-       /* disable CIR */
-       nvt_cir_reg_write(nvt, 0, CIR_IREN);
        nvt_disable_cir(nvt);
+
        /* enable CIR Wake (for IR power-on) */
        nvt_enable_wake(nvt);
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
        rc_unregister_device(nvt->rdev);
 }
@@ -1129,26 +1171,23 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
 
        nvt_dbg("%s called", __func__);
 
-       /* zero out misc state tracking */
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-       nvt->study_state = ST_STUDY_NONE;
-       nvt->wake_state = ST_WAKE_NONE;
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
        spin_lock_irqsave(&nvt->tx.lock, flags);
        nvt->tx.tx_state = ST_TX_NONE;
        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+       /* zero out misc state tracking */
+       nvt->study_state = ST_STUDY_NONE;
+       nvt->wake_state = ST_WAKE_NONE;
+
        /* disable all CIR interrupts */
        nvt_cir_reg_write(nvt, 0, CIR_IREN);
 
-       nvt_efm_enable(nvt);
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
        /* disable cir logical dev */
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
+       nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
 
        /* make sure wake is enabled */
        nvt_enable_wake(nvt);
@@ -1162,16 +1201,6 @@ static int nvt_resume(struct pnp_dev *pdev)
 
        nvt_dbg("%s called", __func__);
 
-       /* open interrupt */
-       nvt_set_cir_iren(nvt);
-
-       /* Enable CIR logical device */
-       nvt_efm_enable(nvt);
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
-
        nvt_cir_regs_init(nvt);
        nvt_cir_wake_regs_init(nvt);
 
@@ -1181,6 +1210,7 @@ static int nvt_resume(struct pnp_dev *pdev)
 static void nvt_shutdown(struct pnp_dev *pdev)
 {
        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+
        nvt_enable_wake(nvt);
 }
 
index 0ad15d34e9c93ce8adedd5f114f84543776f3055..4a5650dffa29a67fad87022659c71b3e2c601910 100644 (file)
@@ -68,7 +68,8 @@ enum nvt_chip_ver {
        NVT_W83667HG    = 0xa510,
        NVT_6775F       = 0xb470,
        NVT_6776F       = 0xc330,
-       NVT_6779D       = 0xc560
+       NVT_6779D       = 0xc560,
+       NVT_INVALID     = 0xffff,
 };
 
 struct nvt_chip {
@@ -157,8 +158,8 @@ struct nvt_dev {
 /* total length of CIR and CIR WAKE */
 #define CIR_IOREG_LENGTH       0x0f
 
-/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL (0x7d0 = 2000) */
-#define CIR_RX_LIMIT_COUNT     0x7d0
+/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL */
+#define CIR_RX_LIMIT_COUNT  (IR_DEFAULT_TIMEOUT / US_TO_NS(SAMPLE_PERIOD))
 
 /* CIR Regs */
 #define CIR_IRCON      0x00
@@ -292,10 +293,7 @@ struct nvt_dev {
 #define CIR_WAKE_IREN_RTR              0x40
 #define CIR_WAKE_IREN_PE               0x20
 #define CIR_WAKE_IREN_RFO              0x10
-#define CIR_WAKE_IREN_TE               0x08
-#define CIR_WAKE_IREN_TTR              0x04
-#define CIR_WAKE_IREN_TFU              0x02
-#define CIR_WAKE_IREN_GH               0x01
+#define CIR_WAKE_IREN_GH               0x08
 
 /* CIR WAKE FIFOCON settings */
 #define CIR_WAKE_FIFOCON_RXFIFOCLR     0x08
index 7359f3d03b647244baeb8c99ad7076e9a11ba74d..585d5e52118d538f7fc764d2793bd98384c72b44 100644 (file)
@@ -16,6 +16,9 @@
 #ifndef _RC_CORE_PRIV
 #define _RC_CORE_PRIV
 
+/* Define the max number of pulse/space transitions to buffer */
+#define        MAX_IR_EVENT_SIZE       512
+
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <media/rc-core.h>
@@ -35,7 +38,8 @@ struct ir_raw_event_ctrl {
        struct list_head                list;           /* to keep track of raw clients */
        struct task_struct              *thread;
        spinlock_t                      lock;
-       struct kfifo_rec_ptr_1          kfifo;          /* fifo for the pulse/space durations */
+       /* fifo for the pulse/space durations */
+       DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE);
        ktime_t                         last_event;     /* when last event occurred */
        enum raw_event_type             last_type;      /* last event type */
        struct rc_dev                   *dev;           /* pointer to the parent rc_dev */
index c69807fe2feff08c47441dc2bf3c780c4440d505..144304c94606169773566ef4f3132f0dc188d4ce 100644 (file)
@@ -20,9 +20,6 @@
 #include <linux/freezer.h>
 #include "rc-core-priv.h"
 
-/* Define the max number of pulse/space transitions to buffer */
-#define MAX_IR_EVENT_SIZE      512
-
 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
 static LIST_HEAD(ir_raw_client_list);
 
@@ -36,14 +33,12 @@ static int ir_raw_event_thread(void *data)
        struct ir_raw_event ev;
        struct ir_raw_handler *handler;
        struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
-       int retval;
 
        while (!kthread_should_stop()) {
 
                spin_lock_irq(&raw->lock);
-               retval = kfifo_len(&raw->kfifo);
 
-               if (retval < sizeof(ev)) {
+               if (!kfifo_len(&raw->kfifo)) {
                        set_current_state(TASK_INTERRUPTIBLE);
 
                        if (kthread_should_stop())
@@ -54,7 +49,8 @@ static int ir_raw_event_thread(void *data)
                        continue;
                }
 
-               retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+               if(!kfifo_out(&raw->kfifo, &ev, 1))
+                       dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
                spin_unlock_irq(&raw->lock);
 
                mutex_lock(&ir_raw_handler_lock);
@@ -87,8 +83,10 @@ int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
        IR_dprintk(2, "sample: (%05dus %s)\n",
                   TO_US(ev->duration), TO_STR(ev->pulse));
 
-       if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
-               return -ENOMEM;
+       if (!kfifo_put(&dev->raw->kfifo, *ev)) {
+               dev_err(&dev->dev, "IR event FIFO is full!\n");
+               return -ENOSPC;
+       }
 
        return 0;
 }
@@ -273,11 +271,7 @@ int ir_raw_event_register(struct rc_dev *dev)
 
        dev->raw->dev = dev;
        dev->change_protocol = change_protocol;
-       rc = kfifo_alloc(&dev->raw->kfifo,
-                        sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
-                        GFP_KERNEL);
-       if (rc < 0)
-               goto out;
+       INIT_KFIFO(dev->raw->kfifo);
 
        spin_lock_init(&dev->raw->lock);
        dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
@@ -319,7 +313,6 @@ void ir_raw_event_unregister(struct rc_dev *dev)
                        handler->raw_unregister(dev);
        mutex_unlock(&ir_raw_handler_lock);
 
-       kfifo_free(&dev->raw->kfifo);
        kfree(dev->raw);
        dev->raw = NULL;
 }
index 504bfbc4027adc4953ebdde65658782491182b34..9f3e0fd4cad9f6c63faf32ed914bba50af29ab6a 100644 (file)
@@ -461,13 +461,12 @@ static int m88rs6000t_sleep(struct dvb_frontend *fe)
        dev_dbg(&dev->client->dev, "%s:\n", __func__);
 
        ret = regmap_write(dev->regmap, 0x07, 0x6d);
-       if (ret)
-               goto err;
-       usleep_range(5000, 10000);
-err:
-       if (ret)
+       if (ret) {
                dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-       return ret;
+               return ret;
+       }
+       usleep_range(5000, 10000);
+       return 0;
 }
 
 static int m88rs6000t_get_frequency(struct dvb_frontend *fe, u32 *frequency)
index a7a8452e99d23b2598e4bcb11bbe61a63d735c31..6ab35e315fe7a2dd11ce4078357b0dffff98d1b8 100644 (file)
@@ -1295,7 +1295,7 @@ static int generic_set_freq(struct dvb_frontend *fe,
                            v4l2_std_id std, u32 delsys)
 {
        struct r820t_priv               *priv = fe->tuner_priv;
-       int                             rc = -EINVAL;
+       int                             rc;
        u32                             lo_freq;
 
        tuner_dbg("should set frequency to %d kHz, bw %d MHz\n",
index 0e1ca2b00e61e3e78e3f30f1f357a2902aebb7d0..3450dfb7c427ad1841cfa31cde2647eee31fde72 100644 (file)
@@ -364,8 +364,8 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
 static const struct dvb_tuner_ops si2157_ops = {
        .info = {
                .name           = "Silicon Labs Si2146/2147/2148/2157/2158",
-               .frequency_min  = 55000000,
-               .frequency_max  = 862000000,
+               .frequency_min  = 42000000,
+               .frequency_max  = 870000000,
        },
 
        .init = si2157_init,
@@ -458,6 +458,9 @@ static int si2157_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&dev->stat_work);
+
        memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
        fe->tuner_priv = NULL;
        kfree(dev);
index 4e941f00b6008f286b839d8e2f72c41978cc98e5..082ff5608455c273ea9c53e1824bbb76fad15a0e 100644 (file)
@@ -1403,11 +1403,12 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
         * in order to avoid troubles during device release.
         */
        kfree(priv->ctrl.fname);
+       priv->ctrl.fname = NULL;
        memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
        if (p->fname) {
                priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
                if (priv->ctrl.fname == NULL)
-                       rc = -ENOMEM;
+                       return -ENOMEM;
        }
 
        /*
index aee2d76e8dfc996dd3fa4e1f09ca0c55be47e2ed..8def19d9ab921698af30fef7bf7ce5529e097c0b 100644 (file)
@@ -52,7 +52,7 @@ struct as10x_bus_adapter_t {
        struct as10x_cmd_t *cmd, *rsp;
 
        /* bus adapter private ops callback */
-       struct as102_priv_ops_t *ops;
+       const struct as102_priv_ops_t *ops;
 };
 
 struct as102_dev_t {
index 3f669066ccf6837dd8b4022c7f03ab166e3a2f42..0e8030c071b8e74bfdd46e229f258f9b50aa64ca 100644 (file)
@@ -189,7 +189,7 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
        return actual_len;
 }
 
-static struct as102_priv_ops_t as102_priv_ops = {
+static const struct as102_priv_ops_t as102_priv_ops = {
        .upload_fw_pkt  = as102_send_ep1,
        .xfer_cmd       = as102_usb_xfer_cmd,
        .as102_read_ep2 = as102_read_ep2,
index 9e29e70a78d7b20f8b250cbde914c2a589a82052..df2bc3f732b697b359ef4574bd7db1e9f297bd1f 100644 (file)
@@ -276,7 +276,7 @@ static int au0828_create_media_graph(struct au0828_dev *dev)
                return -EINVAL;
 
        if (tuner) {
-               ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT,
+               ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
                                            decoder, 0,
                                            MEDIA_LNK_FL_ENABLED);
                if (ret)
index 94363a3ba400a3ccdcaa3d6ec88ebc082d1ecdf2..0e174e86061404d6e01f36d8ff2db5e0f856702b 100644 (file)
@@ -181,7 +181,7 @@ static int stop_urb_transfer(struct au0828_dev *dev)
 static int start_urb_transfer(struct au0828_dev *dev)
 {
        struct urb *purb;
-       int i, ret = -ENOMEM;
+       int i, ret;
 
        dprintk(2, "%s()\n", __func__);
 
@@ -194,7 +194,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
 
                dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
                if (!dev->urbs[i])
-                       goto err;
+                       return -ENOMEM;
 
                purb = dev->urbs[i];
 
@@ -207,9 +207,10 @@ static int start_urb_transfer(struct au0828_dev *dev)
                if (!purb->transfer_buffer) {
                        usb_free_urb(purb);
                        dev->urbs[i] = NULL;
+                       ret = -ENOMEM;
                        pr_err("%s: failed big buffer allocation, err = %d\n",
                               __func__, ret);
-                       goto err;
+                       return ret;
                }
 
                purb->status = -EINPROGRESS;
@@ -235,10 +236,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
        }
 
        dev->urb_streaming = true;
-       ret = 0;
-
-err:
-       return ret;
+       return 0;
 }
 
 static void au0828_start_transport(struct au0828_dev *dev)
index 0bd96906339227ec8b3df4e78640fc500be4fb87..d4bdba60b0f71436065e4884d5622cc34dffbfe6 100644 (file)
@@ -10,7 +10,7 @@
 /* Version information */
 #define DRIVER_VERSION "0.1"
 #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV USB Driver"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
 
 /* debug */
 #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
index 48643b94e69449503618e2d95fc16a8da01c319b..c9320d6c613145888fe41f43034bfb32b88aabb5 100644 (file)
@@ -1382,6 +1382,8 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
        buffer_size = urb->actual_length;
 
        buffer = kmalloc(buffer_size, GFP_ATOMIC);
+       if (!buffer)
+               return -ENOMEM;
 
        memcpy(buffer, dma_q->ps_head, 3);
        memcpy(buffer+3, p_buffer, buffer_size-3);
index de4ae5eb4830bc4ce6c5c1513bf7f3b29bfa09a6..a6a9508418f8eebef0a4ae41c1e312610afb598c 100644 (file)
@@ -499,6 +499,11 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
        }
 
        dev->adev.users--;
+       if (substream->runtime->dma_area) {
+               dev_dbg(dev->dev, "freeing\n");
+               vfree(substream->runtime->dma_area);
+               substream->runtime->dma_area = NULL;
+       }
        mutex_unlock(&dev->lock);
 
        if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
index 620b83d03f75a1c6952f4982d2a85c53fe0e54aa..54e43fe13e6d35da096e2b0faf08be166cc404b8 100644 (file)
@@ -1259,7 +1259,7 @@ static int cx231xx_create_media_graph(struct cx231xx *dev)
                return 0;
 
        if (tuner) {
-               ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT, decoder, 0,
+               ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT, decoder, 0,
                                            MEDIA_LNK_FL_ENABLED);
                if (ret < 0)
                        return ret;
index 6e02a15d39ce90a1da6c95f6431fa5d04cdb9ec9..b3c09fe54d9bf7f681fb9a964cc8e65b617b1741 100644 (file)
@@ -684,7 +684,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
        if (ret < 0)
                goto err;
 
-       if (tmp == 1 || tmp == 3) {
+       if (tmp == 1 || tmp == 3 || tmp == 5) {
                /* configure gpioh1, reset & power slave demod */
                ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
                if (ret < 0)
@@ -823,7 +823,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
        if (ret < 0)
                goto err;
 
-       if (tmp == 1 || tmp == 3)
+       if (tmp == 1 || tmp == 3 || tmp == 5)
                state->dual_mode = true;
 
        dev_dbg(&d->udev->dev, "%s: ts mode=%d dual mode=%d\n", __func__,
index 416a97f05ec8539fbc2c11dad23e1261a819650c..df22001f9e41273d137ab0c92979ecbb9c9b29b3 100644 (file)
@@ -112,9 +112,10 @@ static const u32 clock_lut_it9135[] = {
  * 0  TS
  * 1  DCA + PIP
  * 3  PIP
+ * 5  DCA + PIP
  * n  DCA
  *
- * Values 0 and 3 are seen to this day. 0 for single TS and 3 for dual TS.
+ * Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS.
  */
 
 #define EEPROM_BASE_AF9035        0x42fd
index 023d91f7e6542f0905af07964648988c252a5ad7..35f27e2e4e284f5a6554fe963c60931034d13864 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
index 45f07090d431a25c3c2b2c645d90bf9c5b113c14..a1622bda2a5e00be3529309b1afe18bfa4ee3872 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
index f0565bf3673e8687ba16599aafc59152ad85f1da..5ec159f2239974ca14bbf8038ce865b49ac58a4b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
@@ -1129,7 +1129,7 @@ int dvb_usbv2_reset_resume(struct usb_interface *intf)
 EXPORT_SYMBOL(dvb_usbv2_reset_resume);
 
 MODULE_VERSION("2.0");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("DVB USB common");
 MODULE_LICENSE("GPL");
index 22bdce15ecf31fcd351309ca7fa5794cb228869d..5bafeb6486beb3c7da3459e63a74ca0de6994957 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
index 1dd962535f972dda834a913cd4e5ae6bd26b9bc7..02dbc6c45423a157b0ae857dbb9963dc483e712a 100644 (file)
@@ -847,10 +847,17 @@ static const struct usb_device_id dvbsky_id_table[] = {
                USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
                &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
                RC_MAP_TT_1500) },
+       { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+               USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2,
+               &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1",
+               RC_MAP_TT_1500) },
        { DVB_USB_DEVICE(USB_VID_TERRATEC,
                USB_PID_TERRATEC_H7_3,
                &dvbsky_t680c_props, "Terratec H7 Rev.4",
                RC_MAP_TT_1500) },
+       { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4,
+               &dvbsky_s960_props, "Terratec Cinergy S2 Rev.4",
+               RC_MAP_DVBSKY) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
index 444579be0b779618d63c61ad23cb7d531faee64e..7d16252dbb71f2c053ac52466592f555487051af 100644 (file)
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
 struct mxl111sf_tuner_state {
        struct mxl111sf_state *mxl_state;
 
-       struct mxl111sf_tuner_config *cfg;
+       const struct mxl111sf_tuner_config *cfg;
 
        enum mxl_if_freq if_freq;
 
@@ -489,8 +489,8 @@ static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
 };
 
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state,
-                                          struct mxl111sf_tuner_config *cfg)
+                               struct mxl111sf_state *mxl_state,
+                               const struct mxl111sf_tuner_config *cfg)
 {
        struct mxl111sf_tuner_state *state = NULL;
 
index e6caab21a1973e2a918de7d095fb2c548a052e22..509b5507121849fea1c0526dd1cbc4494ec49930 100644 (file)
@@ -63,13 +63,13 @@ struct mxl111sf_tuner_config {
 #if IS_ENABLED(CONFIG_DVB_USB_MXL111SF)
 extern
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state,
-                                          struct mxl111sf_tuner_config *cfg);
+                               struct mxl111sf_state *mxl_state,
+                               const struct mxl111sf_tuner_config *cfg);
 #else
 static inline
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state,
-                                          struct mxl111sf_tuner_config *cfg)
+                               struct mxl111sf_state *mxl_state,
+                               const struct mxl111sf_tuner_config *cfg)
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
index b669deccc34c64065e801ee29a68d85b05c8dedc..5d676b533a3ab245956764592047219d035c0069 100644 (file)
@@ -856,7 +856,7 @@ static int mxl111sf_ant_hunt(struct dvb_frontend *fe)
        return 0;
 }
 
-static struct mxl111sf_tuner_config mxl_tuner_config = {
+static const struct mxl111sf_tuner_config mxl_tuner_config = {
        .if_freq         = MXL_IF_6_0, /* applies to external IF output, only */
        .invert_spectrum = 0,
        .read_reg        = mxl111sf_read_reg,
@@ -888,7 +888,7 @@ static int mxl111sf_attach_tuner(struct dvb_usb_adapter *adap)
        state->tuner.function = MEDIA_ENT_F_TUNER;
        state->tuner.name = "mxl111sf tuner";
        state->tuner_pads[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
-       state->tuner_pads[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+       state->tuner_pads[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
 
        ret = media_entity_pads_init(&state->tuner,
                                     TUNER_NUM_PADS, state->tuner_pads);
index eb5787a3191e7afc31e69e6e9757297b745279c0..c4c6e92e8643c0f782617d534ec60b08ddfc01ce 100644 (file)
@@ -259,6 +259,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                ret = -EOPNOTSUPP;
        }
 
+       /* Retry failed I2C messages */
+       if (ret == -EPIPE)
+               ret = -EAGAIN;
+
 err_mutex_unlock:
        mutex_unlock(&d->i2c_mutex);
 
@@ -619,6 +623,10 @@ static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
        }
        dev_dbg(&d->intf->dev, "chip_id=%u\n", dev->chip_id);
 
+       /* Retry failed I2C messages */
+       d->i2c_adap.retries = 1;
+       d->i2c_adap.timeout = msecs_to_jiffies(10);
+
        return WARM;
 err:
        dev_dbg(&d->intf->dev, "failed=%d\n", ret);
index ca8f3c2b10824c29c28f4be0e54ae931f08a8059..55136cde38f57c8841974b5f734054ee178adbdf 100644 (file)
@@ -1,6 +1,6 @@
 /* usb-urb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file keeps functions for initializing and handling the
index 83684ed023cd9bba08a20ae9cfe682f636da5a36..7ba975bea96a6d0ba03f5276f53322bfd29715a1 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB framework compliant Linux driver for the AVerMedia AverTV DVB-T
  * USB2.0 (A800) DVB-T receiver.
  *
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to
  *   - AVerMedia who kindly provided information and
@@ -185,7 +185,7 @@ static struct usb_driver a800_driver = {
 
 module_usb_driver(a800_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index ab715118172878ff52c8ba3e1002ec2357971a8c..907ac01ae2979a56657e6ddc32fafecfbb9c9042 100644 (file)
@@ -13,7 +13,7 @@
  *
  * TODO: Use the cx25840-driver for the analogue part
  *
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
  * Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org)
  * Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au)
  *
@@ -2314,7 +2314,7 @@ static struct usb_driver cxusb_driver = {
 
 module_usb_driver(cxusb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
 MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design");
index 0d248ce02a9b2eb02b9da6fecbb6175319110574..c16f999b9d7c9de8c2cc75526256ba30fa69a33c 100644 (file)
@@ -881,7 +881,7 @@ static struct usb_driver dib0700_driver = {
 module_usb_driver(dib0700_driver);
 
 MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 7ed49646a699dd750fe7eded35632de7fd7cf4a4..ea0391e32d23b16ac848e33d60492a9e9e3dbb22 100644 (file)
@@ -1736,8 +1736,13 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
        struct dib0700_adapter_state *st = adap->priv;
        struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
 
-       if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
-               return -ENODEV;
+       if (adap->id == 0) {
+               if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+                       return -ENODEV;
+       } else {
+               if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+                       return -ENODEV;
+       }
 
        st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
        adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override;
@@ -1773,6 +1778,20 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
        return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
 }
 
+static int stk809x_frontend1_attach(struct dvb_usb_adapter *adap)
+{
+       struct dib0700_adapter_state *state = adap->priv;
+
+       if (!dvb_attach(dib8000_attach, &state->dib8000_ops))
+               return -ENODEV;
+
+       state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x82, 0);
+
+       adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]);
+
+       return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
+}
+
 static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap)
 {
        struct dib0700_adapter_state *st = adap->priv;
@@ -3794,6 +3813,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
 /* 80 */{ USB_DEVICE(USB_VID_ELGATO,   USB_PID_ELGATO_EYETV_DTT_2) },
        { USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E) },
        { USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E_SE) },
+       { USB_DEVICE(USB_VID_PCTV,      USB_PID_DIBCOM_STK8096PVR) },
        { 0 }           /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -4959,6 +4979,59 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                                            RC_BIT_NEC,
                        .change_protocol  = dib0700_change_protocol,
                },
+       }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+               .num_adapters = 2,
+               .adapter = {
+                       {
+                               .num_frontends = 1,
+                               .fe = {{
+                                       .caps  = DVB_USB_ADAP_HAS_PID_FILTER |
+                                               DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                                       .pid_filter_count = 32,
+                                       .pid_filter = stk80xx_pid_filter,
+                                       .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+                                       .frontend_attach  = stk809x_frontend_attach,
+                                       .tuner_attach     = dib809x_tuner_attach,
+
+                                       DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+                               } },
+                               .size_of_priv =
+                                       sizeof(struct dib0700_adapter_state),
+                       }, {
+                               .num_frontends = 1,
+                               .fe = { {
+                                       .caps  = DVB_USB_ADAP_HAS_PID_FILTER |
+                                               DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                                       .pid_filter_count = 32,
+                                       .pid_filter = stk80xx_pid_filter,
+                                       .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+                                       .frontend_attach  = stk809x_frontend1_attach,
+                                       .tuner_attach     = dib809x_tuner_attach,
+
+                                       DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+                               } },
+                               .size_of_priv =
+                                       sizeof(struct dib0700_adapter_state),
+                       },
+               },
+               .num_device_descs = 1,
+               .devices = {
+                       {   "DiBcom STK8096-PVR reference design",
+                               { &dib0700_usb_id_table[83], NULL },
+                               { NULL },
+                       },
+               },
+
+               .rc.core = {
+                       .rc_interval      = DEFAULT_RC_INTERVAL,
+                       .rc_codes         = RC_MAP_DIB0700_RC5_TABLE,
+                       .module_name  = "dib0700",
+                       .rc_query         = dib0700_rc_query_old_firmware,
+                       .allowed_protos   = RC_BIT_RC5 |
+                               RC_BIT_RC6_MCE |
+                               RC_BIT_NEC,
+                       .change_protocol  = dib0700_change_protocol,
+               },
        },
 };
 
index ef3a8f75f82ebe21b4ee5b7963aed9bbf8449c32..35de6095926df2289fa089a5b2a4410a9f0d15d8 100644 (file)
@@ -1,6 +1,6 @@
 /* Common methods for dibusb-based-receivers.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index a4ac37e0e98b692ccc4a72ea5f93c556bcaf1447..a0057641cc86838d80d694ca9511e8547a37e480 100644 (file)
@@ -1,10 +1,10 @@
 /* DVB USB compliant linux driver for mobile DVB-T USB devices based on
  * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-B)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DiBcom, which has
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -465,7 +465,7 @@ static struct usb_driver dibusb_driver = {
 
 module_usb_driver(dibusb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 9d1a59d09c523c3fb8d6ae4be4f6641a0b6c1507..08fb8a3f6e0cc23e87ff150c747361fd5259b1f6 100644 (file)
@@ -1,10 +1,10 @@
 /* DVB USB compliant linux driver for mobile DVB-T USB devices based on
  * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-C/P)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DiBcom, which has
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -143,7 +143,7 @@ static struct usb_driver dibusb_mc_driver = {
 
 module_usb_driver(dibusb_mc_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 32ab1392313fbdaed6ec67591cbd38e964bde878..3f82163d8ab89061410bdff4b68bea2e1d153c27 100644 (file)
@@ -1,6 +1,6 @@
 /* Header file for all dibusb-based-receivers.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index 772bde3c5020a63b026567cf6e62ba44f2c8714b..63134335c99406ca7df3d284cd3ba95d4154cae3 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB compliant linux driver for Nebula Electronics uDigiTV DVB-T USB2.0
  * receiver
  *
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * partly based on the SDK published by Nebula Electronics
  *
@@ -348,7 +348,7 @@ static struct usb_driver digitv_driver = {
 
 module_usb_driver(digitv_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0");
 MODULE_VERSION("1.0-alpha");
 MODULE_LICENSE("GPL");
index 8637ad1be6be430895c403ef2382f38d58494f0b..7e72a1bef76a22d6070a20ea71297e6a5da280ec 100644 (file)
@@ -1,7 +1,7 @@
 /* Frontend part of the Linux driver for the WideView/ Yakumo/ Hama/
  * Typhoon/ Yuan DVB-T USB2.0 receiver.
  *
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index c357fb3b0a883c382a6c480a0c9567f665f8d10e..ca3b69aa96882f47692e5bbac10b12943dbba418 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB library compliant Linux driver for the WideView/ Yakumo/ Hama/
  * Typhoon/ Yuan/ Miglia DVB-T USB2.0 receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Steve Chang from WideView for providing support for the WT-220U.
  *
@@ -362,7 +362,7 @@ static struct usb_driver dtt200u_usb_driver = {
 
 module_usb_driver(dtt200u_usb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 005b0a7df3585d1653e61d8b8a63a9278a0ab7cf..efccc399b1cba7cdb92f59b48b7033fa1619b7d1 100644 (file)
@@ -1,7 +1,7 @@
 /* Common header file of Linux driver for the WideView/ Yakumo/ Hama/
  * Typhoon/ Yuan DVB-T USB2.0 receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index 6b7b2a89242e797d9bbe0c959ddef57515a25a8f..7e619d638809b12fce6c5fe9c7d744e0b56ef87a 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-common.h is part of the DVB USB library.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * a header file containing prototypes and types for internal use of the dvb-usb-lib
index 9ddfcab268be9c797e7ce6a276dc4168fe0651df..71de19ba0e01e211fc4d071ddc61b35755953513 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-dvb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for initializing and handling the
index 733a7ff7b207819bcd05f2a816388fa21e9f69b5..dd048a7c461c8ffe1884b52ca80ff2b35233adaf 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-firmware.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for downloading the firmware to Cypress FX 1 and 2 based devices.
index 88e4a62abc44db45e72ea93bc46334805e0ef2f7..4f0b0adce7f5dfe18d47af28968b9d2d307fb3ff 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-i2c.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for (de-)initializing an I2C adapter.
index 1adf325012f7340f9648ae46b5df72532beeb94c..3896ba9a4179670687eadb63bdd199e251dc904e 100644 (file)
@@ -3,7 +3,7 @@
  *
  * dvb-usb-init.c
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -299,6 +299,6 @@ void dvb_usb_device_exit(struct usb_interface *intf)
 EXPORT_SYMBOL(dvb_usb_device_exit);
 
 MODULE_VERSION("1.0");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("A library module containing commonly used USB and DVB function USB DVB devices");
 MODULE_LICENSE("GPL");
index 7b5dae3077f6a2550584b2eaada3da0a1d7632ca..c259f9e43542970d87a32ca2e79806d87d193a8e 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-remote.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for initializing the input-device and for handling remote-control-queries.
index 5c8f651344fc1fd2a713014a82d14cea8cc3718c..95f9097498cb86259f0209013ab499894ab39839 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-urb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file keeps functions for initializing and handling the
index ce4c4e3b58bb7516f9bdff338d1b6b8ed2d0332e..639c4678c65b96c82293f7546919768401f43dbd 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb.h is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * the headerfile, all dvb-usb-drivers have to include.
index 14ef25dc6cd33076a288573ba413eb4e7ded6454..dd46d6c78c4ea27deea934dbb6515b0560a69017 100644 (file)
@@ -1688,6 +1688,7 @@ enum dw2102_table_entry {
        TECHNOTREND_S2_4600,
        TEVII_S482_1,
        TEVII_S482_2,
+       TERRATEC_CINERGY_S2_BOX,
 };
 
 static struct usb_device_id dw2102_table[] = {
@@ -1702,19 +1703,20 @@ static struct usb_device_id dw2102_table[] = {
        [TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
        [PROF_7500] = {USB_DEVICE(0x3034, 0x7500)},
        [GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)},
-       [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)},
+       [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R1)},
        [TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
        [TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
        [X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
        [TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
        [TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
-       [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
+       [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
        [GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
        [GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
        [TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
                USB_PID_TECHNOTREND_CONNECT_S2_4600)},
        [TEVII_S482_1] = {USB_DEVICE(0x9022, 0xd483)},
        [TEVII_S482_2] = {USB_DEVICE(0x9022, 0xd484)},
+       [TERRATEC_CINERGY_S2_BOX] = {USB_DEVICE(USB_VID_TERRATEC, 0x0105)},
        { }
 };
 
@@ -2232,7 +2234,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
                } },
                }
        },
-       .num_device_descs = 3,
+       .num_device_descs = 4,
        .devices = {
                { "TechnoTrend TT-connect S2-4600",
                        { &dw2102_table[TECHNOTREND_S2_4600], NULL },
@@ -2246,6 +2248,10 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
                        { &dw2102_table[TEVII_S482_2], NULL },
                        { NULL },
                },
+               { "Terratec Cinergy S2 USB BOX",
+                       { &dw2102_table[TERRATEC_CINERGY_S2_BOX], NULL },
+                       { NULL },
+               },
        }
 };
 
index 6c55384e2fca8638d7846241dfeffcda528fa96a..fc7569e2728d2201e14897cddd41c9a9504a9716 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB framework compliant Linux driver for the Hauppauge WinTV-NOVA-T usb2
  * DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -227,7 +227,7 @@ static struct usb_driver nova_t_driver = {
 
 module_usb_driver(nova_t_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 6c3c477229551642fc762d753337a115c3d64b10..51487d2f7764cfcec87cb365bc15817123e68ef0 100644 (file)
@@ -512,7 +512,7 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
                        &a->dev->i2c_adap, STV090x_DEMODULATOR_0);
 
        if (a->fe_adap[0].fe) {
-               struct stv6110x_devctl *ctl;
+               const struct stv6110x_devctl *ctl;
 
                ctl = dvb_attach(stv6110x_attach,
                                a->fe_adap[0].fe,
index f10717311e05bb9873c3fa24e40b369b1d9c4237..ecc207fbaf3cfffd54595492bd0e6f7427afa9d2 100644 (file)
@@ -820,7 +820,7 @@ static struct usb_driver ttusb2_driver = {
 
 module_usb_driver(ttusb2_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 9b042292e788db67d5576515e10d54b6e6b6415e..58ad5b4f856c5dd5e1ba0d2a824026dc10cc5e97 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0
  * DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -145,7 +145,7 @@ static struct usb_driver umt_driver = {
 
 module_usb_driver(umt_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index d62ee0f5a1658d64cf165ba1a9fe6dde2a34774a..89173603be6754e1e8e30c68a06affcfa89482fe 100644 (file)
@@ -1,6 +1,6 @@
 /* usb-urb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file keeps functions for initializing and handling the
index d361a72ca0fa227d396d64ebe9e187c2aaa6d393..27398c08c69dc4c6cfec28703ebc53d64949fe75 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de>
  *                    Metzler Brothers Systementwicklung GbR
  *
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
index ee1e19e364452e142f4e35c393714c53a4e6d7d3..40de33de90a7aef04ae6cf95153c8177596a16ba 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de>
  *                    Metzler Brothers Systementwicklung GbR
  *
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
@@ -439,7 +439,7 @@ static struct usb_driver vp702x_usb_driver = {
 
 module_usb_driver(vp702x_usb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index e708afc6a57fbce136a0b894ba660645552f3c64..7765602ea658c4ecf6ef951949dec5a912c8ae26 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB frontend part of the Linux driver for TwinhanDTV Alpha/MagicBoxII USB2.0
  * DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
index d750724132ee372902340fcf8f3bec2e4ef3401a..13340af0d39c34bee810a8b225da511e950dcfc5 100644 (file)
@@ -2,7 +2,7 @@
  *  - TwinhanDTV Alpha/MagicBoxII USB2.0 DVB-T receiver
  *  - DigitalNow TinyUSB2 DVB-t receiver
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
@@ -296,7 +296,7 @@ static struct usb_driver vp7045_usb_driver = {
 
 module_usb_driver(vp7045_usb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index cf5ec46f8bb1bbc40138af764be3f0a79f763aa1..66499932ca767f97156bb9bce08e82a0dfc9d8ef 100644 (file)
@@ -1,7 +1,7 @@
 /* Common header-file of the Linux driver for the TwinhanDTV Alpha/MagicBoxII
  * USB2.0 DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
index b58acd3fcd9918368b2eadac79d61a5f64d80b38..72f3f4d50253a5041573140b6b5afa4b758dd3b8 100644 (file)
@@ -64,6 +64,8 @@ static int em28xx_initialize_mt9m111(struct em28xx *dev)
                i2c_master_send(&dev->i2c_client[dev->def_i2c_bus],
                                &regs[i][0], 3);
 
+       /* FIXME: This won't be creating a sensor at the media graph */
+
        return 0;
 }
 
@@ -91,6 +93,8 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
                i2c_master_send(&dev->i2c_client[dev->def_i2c_bus],
                                &regs[i][0], 3);
 
+       /* FIXME: This won't be creating a sensor at the media graph */
+
        return 0;
 }
 
index a1b6ef5894a689a39c6597aa668db2d1d4115b33..ba442c9674155ad94197f0c4f9fbde7446f0d55c 100644 (file)
@@ -570,7 +570,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,
                .is_webcam     = 1,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = silvercrest_reg_seq,
@@ -583,7 +583,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder      = EM28XX_SAA711X,
                .tuner_type   = TUNER_ABSENT,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -605,7 +605,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,
                .is_webcam     = 1,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                } },
@@ -616,7 +616,7 @@ struct em28xx_board em28xx_boards[] = {
                .tda9887_conf = TDA9887_PRESENT,
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -635,7 +635,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -655,7 +655,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -675,7 +675,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -715,7 +715,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -735,7 +735,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -755,7 +755,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -775,7 +775,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -800,7 +800,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE4,
                        .amux     = EM28XX_AMUX_AUX,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE5,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -819,7 +819,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                } },
@@ -829,7 +829,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = silvercrest_reg_seq,
@@ -848,7 +848,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
@@ -863,7 +863,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,   /* Capture only device */
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type  = EM28XX_VMUX_COMPOSITE1,
+                       .type  = EM28XX_VMUX_COMPOSITE,
                        .vmux  = SAA7115_COMPOSITE0,
                        .amux  = EM28XX_AMUX_LINE_IN,
                }, {
@@ -879,7 +879,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                } },
@@ -889,7 +889,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder      = EM28XX_SAA711X,
                .tuner_type   = TUNER_ABSENT,   /* Capture only device */
                .input        = { {
-                       .type  = EM28XX_VMUX_COMPOSITE1,
+                       .type  = EM28XX_VMUX_COMPOSITE,
                        .vmux  = SAA7115_COMPOSITE0,
                        .amux  = EM28XX_AMUX_LINE_IN,
                }, {
@@ -909,7 +909,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -930,7 +930,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -952,7 +952,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -974,7 +974,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -992,7 +992,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1006,7 +1006,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,  /* Capture only device */
                .decoder       = EM28XX_TVP5150,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1029,7 +1029,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = pinnacle_hybrid_pro_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = pinnacle_hybrid_pro_analog,
@@ -1100,7 +1100,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = terratec_cinergy_USB_XS_FR_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = terratec_cinergy_USB_XS_FR_analog,
@@ -1186,7 +1186,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1213,7 +1213,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1239,7 +1239,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1265,7 +1265,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1291,7 +1291,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1317,7 +1317,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1343,7 +1343,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = default_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = default_analog,
@@ -1368,7 +1368,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1392,7 +1392,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux      = SAA7115_COMPOSITE4,
                        .amux      = EM28XX_AMUX_VIDEO,
                }, {
-                       .type      = EM28XX_VMUX_COMPOSITE1,
+                       .type      = EM28XX_VMUX_COMPOSITE,
                        .vmux      = SAA7115_COMPOSITE0,
                        .amux      = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1413,7 +1413,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1428,7 +1428,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder    = EM28XX_SAA711X,
                .tuner_type = TUNER_ABSENT, /* capture only board */
                .input      = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1443,7 +1443,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,   /* Capture-only board */
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = vc211a_enable,
@@ -1465,7 +1465,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1485,7 +1485,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1500,7 +1500,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT, /* capture only board */
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1520,7 +1520,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1541,7 +1541,7 @@ struct em28xx_board em28xx_boards[] = {
                        .aout     = EM28XX_AOUT_MONO |  /* I2S */
                                    EM28XX_AOUT_MASTER, /* Line out pin */
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1555,6 +1555,7 @@ struct em28xx_board em28xx_boards[] = {
                .buttons = std_snapshot_button,
                .tda9887_conf = TDA9887_PRESENT,
                .tuner_type   = TUNER_YMEC_TVF_5533MF,
+               .tuner_addr   = 0x60,
                .decoder      = EM28XX_SAA711X,
                .input        = { {
                        .type     = EM28XX_VMUX_TELEVISION,
@@ -1563,7 +1564,7 @@ struct em28xx_board em28xx_boards[] = {
                        .aout     = EM28XX_AOUT_MONO |  /* I2S */
                                    EM28XX_AOUT_MASTER, /* Line out pin */
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1581,7 +1582,7 @@ struct em28xx_board em28xx_boards[] = {
                        .type     = EM28XX_VMUX_SVIDEO,
                        .vmux     = SAA7115_SVIDEO3,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                } },
        },
@@ -1610,7 +1611,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = em2880_msi_digivox_ad_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = em2880_msi_digivox_ad_analog,
@@ -1633,7 +1634,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = em2880_msi_digivox_ad_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = em2880_msi_digivox_ad_analog,
@@ -1654,7 +1655,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1677,7 +1678,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = default_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = default_analog,
@@ -1708,7 +1709,7 @@ struct em28xx_board em28xx_boards[] = {
                        .gpio = em2882_kworld_315u_analog,
                        .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
                }, {
-                       .type = EM28XX_VMUX_COMPOSITE1,
+                       .type = EM28XX_VMUX_COMPOSITE,
                        .vmux = SAA7115_COMPOSITE0,
                        .amux = EM28XX_AMUX_LINE_IN,
                        .gpio = em2882_kworld_315u_analog1,
@@ -1735,7 +1736,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux = EM28XX_AMUX_VIDEO,
                        .gpio = default_analog,
                }, {
-                       .type = EM28XX_VMUX_COMPOSITE1,
+                       .type = EM28XX_VMUX_COMPOSITE,
                        .vmux = TVP5150_COMPOSITE1,
                        .amux = EM28XX_AMUX_LINE_IN,
                        .gpio = default_analog,
@@ -1758,7 +1759,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = default_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = default_analog,
@@ -1782,7 +1783,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = pinnacle_hybrid_pro_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = pinnacle_hybrid_pro_analog,
@@ -1808,7 +1809,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1834,7 +1835,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1859,7 +1860,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1904,7 +1905,7 @@ struct em28xx_board em28xx_boards[] = {
                        .gpio     = kworld_330u_analog,
                        .aout     = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = kworld_330u_analog,
@@ -1951,7 +1952,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
 
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1970,7 +1971,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .decoder      = EM28XX_SAA711X,
                .input           = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1990,7 +1991,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, { /* Composite has not been tested yet */
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, { /* S-video has not been tested yet */
@@ -2006,7 +2007,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder         = EM28XX_SAA711X,
                .xclk            = EM28XX_XCLK_FREQUENCY_12MHZ,
                .input           = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -2023,7 +2024,7 @@ struct em28xx_board em28xx_boards[] = {
                .xclk            = EM28XX_XCLK_FREQUENCY_12MHZ,
                .mute_gpio       = terratec_av350_mute_gpio,
                .input           = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AUDIO_SRC_LINE,
                        .gpio     = terratec_av350_unmute_gpio,
@@ -2041,7 +2042,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder      = EM28XX_SAA711X,
                .tuner_type   = TUNER_ABSENT,   /* Capture only device */
                .input        = { {
-                       .type  = EM28XX_VMUX_COMPOSITE1,
+                       .type  = EM28XX_VMUX_COMPOSITE,
                        .vmux  = SAA7115_COMPOSITE0,
                        .amux  = EM28XX_AMUX_LINE_IN,
                }, {
@@ -2067,7 +2068,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = evga_indtube_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = evga_indtube_analog,
@@ -2125,7 +2126,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type          = TUNER_ABSENT,
                .decoder             = EM28XX_SAA711X,
                .input               = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -2238,7 +2239,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = speedlink_vad_laplace_reg_seq,
                } },
@@ -2272,7 +2273,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,  /* Capture only device */
                .decoder       = EM28XX_TVP5150,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -3012,6 +3013,48 @@ static void flush_request_modules(struct em28xx *dev)
        flush_work(&dev->request_module_wk);
 }
 
+static int em28xx_media_device_init(struct em28xx *dev,
+                                   struct usb_device *udev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device *mdev;
+
+       mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+       if (!mdev)
+               return -ENOMEM;
+
+       mdev->dev = &udev->dev;
+
+       if (!dev->name)
+               strlcpy(mdev->model, "unknown em28xx", sizeof(mdev->model));
+       else
+               strlcpy(mdev->model, dev->name, sizeof(mdev->model));
+       if (udev->serial)
+               strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+       strcpy(mdev->bus_info, udev->devpath);
+       mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+       mdev->driver_version = LINUX_VERSION_CODE;
+
+       media_device_init(mdev);
+
+       dev->media_dev = mdev;
+#endif
+       return 0;
+}
+
+static void em28xx_unregister_media_device(struct em28xx *dev)
+{
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+       if (dev->media_dev) {
+               media_device_unregister(dev->media_dev);
+               media_device_cleanup(dev->media_dev);
+               kfree(dev->media_dev);
+               dev->media_dev = NULL;
+       }
+#endif
+}
+
 /*
  * em28xx_release_resources()
  * unregisters the v4l2,i2c and usb devices
@@ -3023,6 +3066,8 @@ static void em28xx_release_resources(struct em28xx *dev)
 
        mutex_lock(&dev->lock);
 
+       em28xx_unregister_media_device(dev);
+
        if (dev->def_i2c_bus)
                em28xx_i2c_unregister(dev, 1);
        em28xx_i2c_unregister(dev, 0);
@@ -3167,6 +3212,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
         */
        snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
 
+       em28xx_media_device_init(dev, udev);
+
        if (dev->is_audio_only) {
                retval = em28xx_audio_setup(dev);
                if (retval)
@@ -3467,7 +3514,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
        /* save our data pointer in this interface device */
        usb_set_intfdata(interface, dev);
 
-       /* allocate device struct */
+       /* allocate device struct and check if the device is a webcam */
        mutex_init(&dev->lock);
        retval = em28xx_init_dev(dev, udev, interface, nr);
        if (retval) {
@@ -3483,6 +3530,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
                try_bulk = usb_xfer_mode > 0;
        }
 
+       /* Disable V4L2 if the device doesn't have a decoder */
+       if (has_video &&
+           dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
+               printk(DRIVER_NAME
+                      ": Currently, V4L2 is not supported on this model\n");
+               has_video = false;
+               dev->has_video = false;
+       }
+
        /* Select USB transfer types to use */
        if (has_video) {
                if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
@@ -3501,9 +3557,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 
        request_modules(dev);
 
-       /* Should be the last thing to do, to avoid newer udev's to
-          open the device before fully initializing it
+       /*
+        * Do it at the end, to reduce dynamic configuration changes during
+        * the device init. Yet, as request_modules() can be async, the
+        * topology will likely change after the load of the em28xx subdrivers.
         */
+#ifdef CONFIG_MEDIA_CONTROLLER
+       retval = media_device_register(dev->media_dev);
+#endif
 
        return 0;
 
index bf5c24467c65bec87b43c14b3f2552cb9dce6aab..ea80541d58f065190b738730d93b3e8cd048750f 100644 (file)
@@ -916,6 +916,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
                       dev->name, result);
                goto fail_adapter;
        }
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+       dvb->adapter.mdev = dev->media_dev;
+#endif
 
        /* Ensure all frontends negotiate bus access */
        dvb->fe[0]->ops.ts_bus_ctrl = em28xx_dvb_bus_ctrl;
@@ -994,8 +997,15 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 
        /* register network adapter */
        dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+
+       result = dvb_create_media_graph(&dvb->adapter, false);
+       if (result < 0)
+               goto fail_create_graph;
+
        return 0;
 
+fail_create_graph:
+       dvb_net_release(&dvb->net);
 fail_fe_conn:
        dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
 fail_fe_mem:
index 0e86ff423c499d196303da2999215e7454d3867f..16a2d4039330307a295d760bef31919c099966b6 100644 (file)
@@ -196,7 +196,6 @@ static void em28xx_wake_i2c(struct em28xx *dev)
        v4l2_device_call_all(v4l2_dev, 0, core,  reset, 0);
        v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
                             INPUT(dev->ctl_input)->vmux, 0, 0);
-       v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0);
 }
 
 static int em28xx_colorlevels_set_default(struct em28xx *dev)
@@ -867,6 +866,275 @@ static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type)
        em28xx_videodbg("res: put %d\n", res_type);
 }
 
+static void em28xx_v4l2_media_release(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       int i;
+
+       for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+               if (!INPUT(i)->type)
+                       return;
+               media_device_unregister_entity(&dev->input_ent[i]);
+       }
+#endif
+}
+
+/*
+ * Media Controller helper functions
+ */
+
+static int em28xx_v4l2_create_media_graph(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct em28xx_v4l2 *v4l2 = dev->v4l2;
+       struct media_device *mdev = dev->media_dev;
+       struct media_entity *entity;
+       struct media_entity *if_vid = NULL, *if_aud = NULL;
+       struct media_entity *tuner = NULL, *decoder = NULL;
+       int i, ret;
+
+       if (!mdev)
+               return 0;
+
+       /* Webcams are really simple */
+       if (dev->board.is_webcam) {
+               media_device_for_each_entity(entity, mdev) {
+                       if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
+                               continue;
+                       ret = media_create_pad_link(entity, 0,
+                                                   &v4l2->vdev.entity, 0,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               }
+               return 0;
+       }
+
+       /* Non-webcams have analog TV decoder and other complexities */
+
+       media_device_for_each_entity(entity, mdev) {
+               switch (entity->function) {
+               case MEDIA_ENT_F_IF_VID_DECODER:
+                       if_vid = entity;
+                       break;
+               case MEDIA_ENT_F_IF_AUD_DECODER:
+                       if_aud = entity;
+                       break;
+               case MEDIA_ENT_F_TUNER:
+                       tuner = entity;
+                       break;
+               case MEDIA_ENT_F_ATV_DECODER:
+                       decoder = entity;
+                       break;
+               }
+       }
+
+       /* Analog setup, using tuner as a link */
+
+       /* Something bad happened! */
+       if (!decoder)
+               return -EINVAL;
+
+       if (tuner) {
+               if (if_vid) {
+                       ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
+                                                   if_vid,
+                                                   IF_VID_DEC_PAD_IF_INPUT,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+                       ret = media_create_pad_link(if_vid, IF_VID_DEC_PAD_OUT,
+                                               decoder, DEMOD_PAD_IF_INPUT,
+                                               MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               } else {
+                       ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
+                                               decoder, DEMOD_PAD_IF_INPUT,
+                                               MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               }
+
+               if (if_aud) {
+                       ret = media_create_pad_link(tuner, TUNER_PAD_AUD_OUT,
+                                                   if_aud,
+                                                   IF_AUD_DEC_PAD_IF_INPUT,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               } else {
+                       if_aud = tuner;
+               }
+
+       }
+       ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT,
+                                   &v4l2->vdev.entity, 0,
+                                   MEDIA_LNK_FL_ENABLED);
+       if (ret)
+               return ret;
+
+       if (em28xx_vbi_supported(dev)) {
+               ret = media_create_pad_link(decoder, DEMOD_PAD_VBI_OUT,
+                                           &v4l2->vbi_dev.entity, 0,
+                                           MEDIA_LNK_FL_ENABLED);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+               struct media_entity *ent = &dev->input_ent[i];
+
+               if (!INPUT(i)->type)
+                       break;
+
+               switch (INPUT(i)->type) {
+               case EM28XX_VMUX_COMPOSITE:
+               case EM28XX_VMUX_SVIDEO:
+                       ret = media_create_pad_link(ent, 0, decoder,
+                                                   DEMOD_PAD_IF_INPUT, 0);
+                       if (ret)
+                               return ret;
+                       break;
+               default: /* EM28XX_VMUX_TELEVISION or EM28XX_RADIO */
+                       if (!tuner)
+                               break;
+
+                       ret = media_create_pad_link(ent, 0, tuner,
+                                                   TUNER_PAD_RF_INPUT,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+                       break;
+               }
+       }
+#endif
+       return 0;
+}
+
+static int em28xx_enable_analog_tuner(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device *mdev = dev->media_dev;
+       struct em28xx_v4l2 *v4l2 = dev->v4l2;
+       struct media_entity *source;
+       struct media_link *link, *found_link = NULL;
+       int ret, active_links = 0;
+
+       if (!mdev || !v4l2->decoder)
+               return 0;
+
+       /*
+        * This will find the tuner that is connected into the decoder.
+        * Technically, this is not 100% correct, as the device may be
+        * using an analog input instead of the tuner. However, as we can't
+        * do DVB streaming while the DMA engine is being used for V4L2,
+        * this should be enough for the actual needs.
+        */
+       list_for_each_entry(link, &v4l2->decoder->links, list) {
+               if (link->sink->entity == v4l2->decoder) {
+                       found_link = link;
+                       if (link->flags & MEDIA_LNK_FL_ENABLED)
+                               active_links++;
+                       break;
+               }
+       }
+
+       if (active_links == 1 || !found_link)
+               return 0;
+
+       source = found_link->source->entity;
+       list_for_each_entry(link, &source->links, list) {
+               struct media_entity *sink;
+               int flags = 0;
+
+               sink = link->sink->entity;
+
+               if (sink == v4l2->decoder)
+                       flags = MEDIA_LNK_FL_ENABLED;
+
+               ret = media_entity_setup_link(link, flags);
+               if (ret) {
+                       pr_err("Couldn't change link %s->%s to %s. Error %d\n",
+                              source->name, sink->name,
+                              flags ? "enabled" : "disabled",
+                              ret);
+                       return ret;
+               } else
+                       em28xx_videodbg("link %s->%s was %s\n",
+                                       source->name, sink->name,
+                                       flags ? "ENABLED" : "disabled");
+       }
+#endif
+       return 0;
+}
+
+static const char * const iname[] = {
+       [EM28XX_VMUX_COMPOSITE]  = "Composite",
+       [EM28XX_VMUX_SVIDEO]     = "S-Video",
+       [EM28XX_VMUX_TELEVISION] = "Television",
+       [EM28XX_RADIO]           = "Radio",
+};
+
+static void em28xx_v4l2_create_entities(struct em28xx *dev)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       struct em28xx_v4l2 *v4l2 = dev->v4l2;
+       int ret, i;
+
+       /* Initialize Video, VBI and Radio pads */
+       v4l2->video_pad.flags = MEDIA_PAD_FL_SINK;
+       ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad);
+       if (ret < 0)
+               pr_err("failed to initialize video media entity!\n");
+
+       if (em28xx_vbi_supported(dev)) {
+               v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK;
+               ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1,
+                                            &v4l2->vbi_pad);
+               if (ret < 0)
+                       pr_err("failed to initialize vbi media entity!\n");
+       }
+
+       /* Webcams don't have input connectors */
+       if (dev->board.is_webcam)
+               return;
+
+       /* Create entities for each input connector */
+       for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+               struct media_entity *ent = &dev->input_ent[i];
+
+               if (!INPUT(i)->type)
+                       break;
+
+               ent->name = iname[INPUT(i)->type];
+               ent->flags = MEDIA_ENT_FL_CONNECTOR;
+               dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+               switch (INPUT(i)->type) {
+               case EM28XX_VMUX_COMPOSITE:
+                       ent->function = MEDIA_ENT_F_CONN_COMPOSITE;
+                       break;
+               case EM28XX_VMUX_SVIDEO:
+                       ent->function = MEDIA_ENT_F_CONN_SVIDEO;
+                       break;
+               default: /* EM28XX_VMUX_TELEVISION or EM28XX_RADIO */
+                       ent->function = MEDIA_ENT_F_CONN_RF;
+                       break;
+               }
+
+               ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
+               if (ret < 0)
+                       pr_err("failed to initialize input pad[%d]!\n", i);
+
+               ret = media_device_register_entity(dev->media_dev, ent);
+               if (ret < 0)
+                       pr_err("failed to register input entity %d!\n", i);
+       }
+#endif
+}
+
+
 /* ------------------------------------------------------------------
        Videobuf2 operations
    ------------------------------------------------------------------*/
@@ -884,6 +1152,9 @@ static int queue_setup(struct vb2_queue *vq,
                return sizes[0] < size ? -EINVAL : 0;
        *nplanes = 1;
        sizes[0] = size;
+
+       em28xx_enable_analog_tuner(dev);
+
        return 0;
 }
 
@@ -962,6 +1233,9 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
                        f.type = V4L2_TUNER_ANALOG_TV;
                v4l2_device_call_all(&v4l2->v4l2_dev,
                                     0, tuner, s_frequency, &f);
+
+               /* Enable video stream at TV decoder */
+               v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 1);
        }
 
        v4l2->streaming_users++;
@@ -981,6 +1255,9 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
        res_free(dev, vq->type);
 
        if (v4l2->streaming_users-- == 1) {
+               /* Disable video stream at TV decoder */
+               v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0);
+
                /* Last active user, so shutdown all the URBS */
                em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
        }
@@ -1013,6 +1290,9 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
        res_free(dev, vq->type);
 
        if (v4l2->streaming_users-- == 1) {
+               /* Disable video stream at TV decoder */
+               v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0);
+
                /* Last active user, so shutdown all the URBS */
                em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
        }
@@ -1224,6 +1504,12 @@ static void scale_to_size(struct em28xx *dev,
 
        *width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
        *height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
+
+       /* Don't let width or height to be zero */
+       if (*width < 1)
+               *width = 1;
+       if (*height < 1)
+               *height = 1;
 }
 
 /* ------------------------------------------------------------------
@@ -1299,6 +1585,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
                v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh,
                                      1, 0);
        }
+       /* Avoid division by zero at size_to_scale */
+       if (width < 1)
+               width = 1;
+       if (height < 1)
+               height = 1;
 
        size_to_scale(dev, width, height, &hscale, &vscale);
        scale_to_size(dev, hscale, vscale, &width, &height);
@@ -1434,18 +1725,6 @@ static int vidioc_s_parm(struct file *file, void *priv,
                                          0, video, s_parm, p);
 }
 
-static const char *iname[] = {
-       [EM28XX_VMUX_COMPOSITE1] = "Composite1",
-       [EM28XX_VMUX_COMPOSITE2] = "Composite2",
-       [EM28XX_VMUX_COMPOSITE3] = "Composite3",
-       [EM28XX_VMUX_COMPOSITE4] = "Composite4",
-       [EM28XX_VMUX_SVIDEO]     = "S-Video",
-       [EM28XX_VMUX_TELEVISION] = "Television",
-       [EM28XX_VMUX_CABLE]      = "Cable TV",
-       [EM28XX_VMUX_DVB]        = "DVB",
-       [EM28XX_VMUX_DEBUG]      = "for debug only",
-};
-
 static int vidioc_enum_input(struct file *file, void *priv,
                             struct v4l2_input *i)
 {
@@ -1463,8 +1742,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
 
        strcpy(i->name, iname[INPUT(n)->type]);
 
-       if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) ||
-           (EM28XX_VMUX_CABLE == INPUT(n)->type))
+       if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type))
                i->type = V4L2_INPUT_TYPE_TUNER;
 
        i->std = dev->v4l2->vdev.tvnorms;
@@ -1961,6 +2239,8 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
 
        em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
 
+       em28xx_v4l2_media_release(dev);
+
        if (video_is_registered(&v4l2->radio_dev)) {
                em28xx_info("V4L2 device %s deregistered\n",
                            video_device_node_name(&v4l2->radio_dev));
@@ -2284,6 +2564,9 @@ static int em28xx_v4l2_init(struct em28xx *dev)
        v4l2->dev = dev;
        dev->v4l2 = v4l2;
 
+#ifdef CONFIG_MEDIA_CONTROLLER
+       v4l2->v4l2_dev.mdev = dev->media_dev;
+#endif
        ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev);
        if (ret < 0) {
                em28xx_errdev("Call to v4l2_device_register() failed!\n");
@@ -2556,6 +2839,16 @@ static int em28xx_v4l2_init(struct em28xx *dev)
                            video_device_node_name(&v4l2->radio_dev));
        }
 
+       /* Init entities at the Media Controller */
+       em28xx_v4l2_create_entities(dev);
+
+       ret = em28xx_v4l2_create_media_graph(dev);
+       if (ret) {
+               em28xx_errdev("failed to create media graph\n");
+               em28xx_v4l2_media_release(dev);
+               goto unregister_dev;
+       }
+
        em28xx_info("V4L2 video device registered as %s\n",
                    video_device_node_name(&v4l2->vdev));
 
@@ -2577,6 +2870,22 @@ static int em28xx_v4l2_init(struct em28xx *dev)
        return 0;
 
 unregister_dev:
+       if (video_is_registered(&v4l2->radio_dev)) {
+               em28xx_info("V4L2 device %s deregistered\n",
+                           video_device_node_name(&v4l2->radio_dev));
+               video_unregister_device(&v4l2->radio_dev);
+       }
+       if (video_is_registered(&v4l2->vbi_dev)) {
+               em28xx_info("V4L2 device %s deregistered\n",
+                           video_device_node_name(&v4l2->vbi_dev));
+               video_unregister_device(&v4l2->vbi_dev);
+       }
+       if (video_is_registered(&v4l2->vdev)) {
+               em28xx_info("V4L2 device %s deregistered\n",
+                           video_device_node_name(&v4l2->vdev));
+               video_unregister_device(&v4l2->vdev);
+       }
+
        v4l2_ctrl_handler_free(&v4l2->ctrl_handler);
        v4l2_device_unregister(&v4l2->v4l2_dev);
 err:
index 8ff066c977d9d3e88b3e32b05cfbd7c6974afaf4..2674449617757a92ec2cd97b9b407f60a32a0a63 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef _EM28XX_H
 #define _EM28XX_H
 
-#define EM28XX_VERSION "0.2.1"
+#define EM28XX_VERSION "0.2.2"
 #define DRIVER_DESC    "Empia em28xx device driver"
 
 #include <linux/workqueue.h>
@@ -291,15 +291,9 @@ struct em28xx_dmaqueue {
 
 #define MAX_EM28XX_INPUT 4
 enum enum28xx_itype {
-       EM28XX_VMUX_COMPOSITE1 = 1,
-       EM28XX_VMUX_COMPOSITE2,
-       EM28XX_VMUX_COMPOSITE3,
-       EM28XX_VMUX_COMPOSITE4,
+       EM28XX_VMUX_COMPOSITE = 1,
        EM28XX_VMUX_SVIDEO,
        EM28XX_VMUX_TELEVISION,
-       EM28XX_VMUX_CABLE,
-       EM28XX_VMUX_DVB,
-       EM28XX_VMUX_DEBUG,
        EM28XX_RADIO,
 };
 
@@ -558,6 +552,11 @@ struct em28xx_v4l2 {
        bool top_field;
        int vbi_read;
        unsigned int field_count;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad video_pad, vbi_pad;
+       struct media_entity *decoder;
+#endif
 };
 
 struct em28xx_audio {
@@ -718,6 +717,12 @@ struct em28xx {
        /* Snapshot button input device */
        char snapshot_button_path[30];  /* path of the input dev */
        struct input_dev *sbutton_input_dev;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device *media_dev;
+       struct media_entity input_ent[MAX_EM28XX_INPUT];
+       struct media_pad input_pad[MAX_EM28XX_INPUT];
+#endif
 };
 
 #define kref_to_dev(d) container_of(d, struct em28xx, ref)
index 745185eb060b5b4c104b220add58a89cac0d23b9..bebee8ca9981f85b05b46162f693aa87c4e28da1 100644 (file)
@@ -250,7 +250,7 @@ struct go7007 {
        struct i2c_adapter i2c_adapter;
 
        /* HPI driver */
-       struct go7007_hpi_ops *hpi_ops;
+       const struct go7007_hpi_ops *hpi_ops;
        void *hpi_context;
        int interrupt_available;
        wait_queue_head_t interrupt_waitq;
index 3dbf14c85c5c847ef8544ec78b96936b76677fd4..14d3f8c1ce4a4135193dab8b263dac34d2d9fe03 100644 (file)
@@ -932,7 +932,7 @@ static void go7007_usb_release(struct go7007 *go)
        kfree(go->hpi_context);
 }
 
-static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
+static const struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
        .interface_reset        = go7007_usb_interface_reset,
        .write_interrupt        = go7007_usb_ezusb_write_interrupt,
        .read_interrupt         = go7007_usb_read_interrupt,
@@ -942,7 +942,7 @@ static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
        .release                = go7007_usb_release,
 };
 
-static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
+static const struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
        .interface_reset        = go7007_usb_interface_reset,
        .write_interrupt        = go7007_usb_onboard_write_interrupt,
        .read_interrupt         = go7007_usb_read_interrupt,
index 3fc64197b4e6a6475c89b61d580bf3a4317e4b35..08f0ca7aa012e9bb4b335dd27fb3fc401717f8d6 100644 (file)
@@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface,
        struct hdpvr_device *dev;
        struct usb_host_interface *iface_desc;
        struct usb_endpoint_descriptor *endpoint;
+#if IS_ENABLED(CONFIG_I2C)
        struct i2c_client *client;
+#endif
        size_t buffer_size;
        int i;
        int retval = -ENOMEM;
index 7dee22deebf3d8b5c85ca95a39e7cb30935b1885..ba7f02270c8391f3d8bc939bcd0af4acd23e837f 100644 (file)
@@ -462,10 +462,8 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
                        }
 
                        if (wait_event_interruptible(dev->wait_data,
-                                             buf->status == BUFSTAT_READY)) {
-                               ret = -ERESTARTSYS;
-                               goto err;
-                       }
+                                             buf->status == BUFSTAT_READY))
+                               return -ERESTARTSYS;
                }
 
                if (buf->status != BUFSTAT_READY)
index c104315fdc17217f4fad27b6c86e017fa126b7b2..2d33033682af3635a1237d14e0bef045185e7ef0 100644 (file)
@@ -839,8 +839,6 @@ static int msi2500_set_usb_adc(struct msi2500_dev *dev)
                goto err;
 
        ret = msi2500_ctrl_msg(dev, CMD_WREG, reg3);
-       if (ret)
-               goto err;
 err:
        return ret;
 }
index 086cf1c7bd7d19e0015950bd6af0d02c311bf090..18aed5dd325e024e1db0860ba954ae75ef50fa7e 100644 (file)
@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
        { USB_DEVICE(0x0471, 0x0312) },
        { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
        { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+       { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
        { USB_DEVICE(0x069A, 0x0001) }, /* Askey */
        { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
        { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
                        name = "Philips SPC 900NC webcam";
                        type_id = 740;
                        break;
+               case 0x032C:
+                       PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+                       name = "Philips SPC 880NC webcam";
+                       type_id = 740;
+                       break;
                default:
                        return -ENODEV;
                        break;
index 46191d5262eb07f3efb9fdf87c75c0665abe6687..6ecb0b48423f3d411cb0874c9c159727241eb3bd 100644 (file)
@@ -98,7 +98,6 @@ void stk1160_buffer_done(struct stk1160 *dev)
 
        buf->vb.sequence = dev->sequence++;
        buf->vb.field = V4L2_FIELD_INTERLACED;
-       buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused;
        buf->vb.vb2_buf.timestamp = ktime_get_ns();
 
        vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused);
index 4ebb33943f9a03a5922c408cfef628847a1053d7..f6cfad46547ef5951923e95d4005aa3f317c1e56 100644 (file)
@@ -312,20 +312,24 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
        usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
        usbtv->chunks_done++;
 
-       /* Last chunk in a frame, signalling an end */
-       if (odd && chunk_no == usbtv->n_chunks-1) {
-               int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
-               enum vb2_buffer_state state = usbtv->chunks_done ==
-                                               usbtv->n_chunks ?
-                                               VB2_BUF_STATE_DONE :
-                                               VB2_BUF_STATE_ERROR;
-
-               buf->vb.field = V4L2_FIELD_INTERLACED;
-               buf->vb.sequence = usbtv->sequence++;
-               buf->vb.vb2_buf.timestamp = ktime_get_ns();
-               vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
-               vb2_buffer_done(&buf->vb.vb2_buf, state);
-               list_del(&buf->list);
+       /* Last chunk in a field */
+       if (chunk_no == usbtv->n_chunks-1) {
+               /* Last chunk in a frame, signalling an end */
+               if (odd && !usbtv->last_odd) {
+                       int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+                       enum vb2_buffer_state state = usbtv->chunks_done ==
+                               usbtv->n_chunks ?
+                               VB2_BUF_STATE_DONE :
+                               VB2_BUF_STATE_ERROR;
+
+                       buf->vb.field = V4L2_FIELD_INTERLACED;
+                       buf->vb.sequence = usbtv->sequence++;
+                       buf->vb.vb2_buf.timestamp = ktime_get_ns();
+                       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+                       vb2_buffer_done(&buf->vb.vb2_buf, state);
+                       list_del(&buf->list);
+               }
+               usbtv->last_odd = odd;
        }
 
        spin_unlock_irqrestore(&usbtv->buflock, flags);
@@ -389,6 +393,10 @@ static struct urb *usbtv_setup_iso_transfer(struct usbtv *usbtv)
        ip->transfer_flags = URB_ISO_ASAP;
        ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS,
                                                GFP_KERNEL);
+       if (!ip->transfer_buffer) {
+               usb_free_urb(ip);
+               return NULL;
+       }
        ip->complete = usbtv_iso_cb;
        ip->number_of_packets = USBTV_ISOC_PACKETS;
        ip->transfer_buffer_length = size * USBTV_ISOC_PACKETS;
@@ -639,6 +647,7 @@ static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count)
        if (usbtv->udev == NULL)
                return -ENODEV;
 
+       usbtv->last_odd = 1;
        usbtv->sequence = 0;
        return usbtv_start(usbtv);
 }
index 19cb8bf7c4e9b3c8befe5d82597ec2fdb8b258c7..161b38d5cfa0463e939af70fdfddfeb48510980e 100644 (file)
@@ -95,6 +95,7 @@ struct usbtv {
        int width, height;
        int n_chunks;
        int iso_size;
+       int last_odd;
        unsigned int sequence;
        struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
 
index de9ff3bb8edd8e7a94826bf58895d009dc420038..12f5ebbd0436e770022e7501a80f5dda7849f9a1 100644 (file)
@@ -162,8 +162,7 @@ MODULE_ALIAS(DRIVER_ALIAS);
 
 static inline struct usb_usbvision *cd_to_usbvision(struct device *cd)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        return video_get_drvdata(vdev);
 }
 
@@ -177,8 +176,7 @@ static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
 static ssize_t show_model(struct device *cd,
                          struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%s\n",
                       usbvision_device_data[usbvision->dev_model].model_string);
@@ -188,8 +186,7 @@ static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
 static ssize_t show_hue(struct device *cd,
                        struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_HUE;
@@ -203,8 +200,7 @@ static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
 static ssize_t show_contrast(struct device *cd,
                             struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_CONTRAST;
@@ -218,8 +214,7 @@ static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
 static ssize_t show_brightness(struct device *cd,
                               struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_BRIGHTNESS;
@@ -233,8 +228,7 @@ static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
 static ssize_t show_saturation(struct device *cd,
                               struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_SATURATION;
@@ -248,8 +242,7 @@ static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
 static ssize_t show_streaming(struct device *cd,
                              struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%s\n",
                       YES_NO(usbvision->streaming == stream_on ? 1 : 0));
@@ -259,8 +252,7 @@ static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
 static ssize_t show_compression(struct device *cd,
                                struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%s\n",
                       YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS));
@@ -270,8 +262,7 @@ static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
 static ssize_t show_device_bridge(struct device *cd,
                                  struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%d\n", usbvision->bridge_type);
 }
@@ -1156,6 +1147,7 @@ static int usbvision_radio_close(struct file *file)
        usbvision_audio_off(usbvision);
        usbvision->radio = 0;
        usbvision->user--;
+       mutex_unlock(&usbvision->v4l2_lock);
 
        if (usbvision->remove_pending) {
                printk(KERN_INFO "%s: Final disconnect\n", __func__);
@@ -1164,7 +1156,6 @@ static int usbvision_radio_close(struct file *file)
                return 0;
        }
 
-       mutex_unlock(&usbvision->v4l2_lock);
        PDEBUG(DBG_IO, "success");
        return v4l2_fh_release(file);
 }
index 9beece00869bf0e27a99fc641b8f926c0d3bad8f..29b3436d0910fbd0c1a591b0aea408dc34aca74a 100644 (file)
@@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON
 # Used by drivers that need tuner.ko
 config VIDEO_TUNER
        tristate
-       depends on MEDIA_TUNER
 
 # Used by drivers that need v4l2-mem2mem.ko
 config V4L2_MEM2MEM_DEV
index 76496fd282aa532da3e84960ea7dddeedc013ccf..731487be5baa993147206ec8f85242cd20dbbc91 100644 (file)
@@ -696,16 +696,32 @@ static int tuner_probe(struct i2c_client *client,
        /* Should be just before return */
 register_client:
 #if defined(CONFIG_MEDIA_CONTROLLER)
-       t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
-       t->pad[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
-       t->sd.entity.function = MEDIA_ENT_F_TUNER;
        t->sd.entity.name = t->name;
+       /*
+        * Handle the special case where the tuner has actually
+        * two stages: the PLL to tune into a frequency and the
+        * IF-PLL demodulator (tda988x).
+        */
+       if (t->type == TUNER_TDA9887) {
+               t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+               t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+               ret = media_entity_pads_init(&t->sd.entity,
+                                            IF_VID_DEC_PAD_NUM_PADS,
+                                            &t->pad[0]);
+               t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
+       } else {
+               t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+               t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+               t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+               ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
+                                            &t->pad[0]);
+               t->sd.entity.function = MEDIA_ENT_F_TUNER;
+       }
 
-       ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, &t->pad[0]);
        if (ret < 0) {
                tuner_err("failed to initialize media entity!\n");
                kfree(t);
-               return -ENODEV;
+               return ret;
        }
 #endif
        /* Sets a default mode */
index 8fd84a67478a77423cc6bc051e0654276561502c..019644ff627d775f7f4b67e29b46baab9dadabbf 100644 (file)
@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                get_user(kp->index, &up->index) ||
                get_user(kp->type, &up->type) ||
                get_user(kp->flags, &up->flags) ||
-               get_user(kp->memory, &up->memory))
+               get_user(kp->memory, &up->memory) ||
+               get_user(kp->length, &up->length))
                        return -EFAULT;
 
        if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                        return -EFAULT;
 
        if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-               if (get_user(kp->length, &up->length))
-                       return -EFAULT;
-
                num_planes = kp->length;
                if (num_planes == 0) {
                        kp->m.planes = NULL;
@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
        } else {
                switch (kp->memory) {
                case V4L2_MEMORY_MMAP:
-                       if (get_user(kp->length, &up->length) ||
-                               get_user(kp->m.offset, &up->m.offset))
+                       if (get_user(kp->m.offset, &up->m.offset))
                                return -EFAULT;
                        break;
                case V4L2_MEMORY_USERPTR:
                        {
                        compat_long_t tmp;
 
-                       if (get_user(kp->length, &up->length) ||
-                           get_user(tmp, &up->m.userptr))
+                       if (get_user(tmp, &up->m.userptr))
                                return -EFAULT;
 
                        kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
                put_user(kp->sequence, &up->sequence) ||
                put_user(kp->reserved2, &up->reserved2) ||
-               put_user(kp->reserved, &up->reserved))
+               put_user(kp->reserved, &up->reserved) ||
+               put_user(kp->length, &up->length))
                        return -EFAULT;
 
        if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
        } else {
                switch (kp->memory) {
                case V4L2_MEMORY_MMAP:
-                       if (put_user(kp->length, &up->length) ||
-                               put_user(kp->m.offset, &up->m.offset))
+                       if (put_user(kp->m.offset, &up->m.offset))
                                return -EFAULT;
                        break;
                case V4L2_MEMORY_USERPTR:
-                       if (put_user(kp->length, &up->length) ||
-                               put_user(kp->m.userptr, &up->m.userptr))
+                       if (put_user(kp->m.userptr, &up->m.userptr))
                                return -EFAULT;
                        break;
                case V4L2_MEMORY_OVERLAY:
index ec258b73001a26a96dcdb56d8c76e64b5762f1d9..889de0a321529ace36560ddabd682b74f080de8d 100644 (file)
@@ -165,7 +165,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
            bt->width > cap->max_width ||
            bt->pixelclock < cap->min_pixelclock ||
            bt->pixelclock > cap->max_pixelclock ||
-           (cap->standards && bt->standards &&
+           (!(caps & V4L2_DV_BT_CAP_CUSTOM) &&
+            cap->standards && bt->standards &&
             !(bt->standards & cap->standards)) ||
            (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
            (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
index b27cbb1f5afe734dd3b17b4e64ac40d1b2871276..93b33681776ca427703dee8bbf6616cca95d7938 100644 (file)
@@ -146,7 +146,7 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
  * variable without a low fixed limit. Please use
  * v4l2_of_alloc_parse_endpoint() in new drivers instead.
  *
- * Return: 0.
+ * Return: 0 on success or a negative error code on failure.
  */
 int v4l2_of_parse_endpoint(const struct device_node *node,
                           struct v4l2_of_endpoint *endpoint)
index c5d49d7a0d76d09c1ac5598a21ce7726d03190c1..dab94080ec3ae80941b275d2a226dfe777066b86 100644 (file)
@@ -1063,8 +1063,11 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
  */
 static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
 {
-       int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
-                       vb, pb, vb->planes);
+       int ret = 0;
+
+       if (pb)
+               ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+                                vb, pb, vb->planes);
        return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
 }
 
@@ -1077,14 +1080,16 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
        struct vb2_queue *q = vb->vb2_queue;
        void *mem_priv;
        unsigned int plane;
-       int ret;
+       int ret = 0;
        enum dma_data_direction dma_dir =
                q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        bool reacquired = vb->planes[0].mem_priv == NULL;
 
        memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
        /* Copy relevant information provided by the userspace */
-       ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+       if (pb)
+               ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+                                vb, pb, planes);
        if (ret)
                return ret;
 
@@ -1192,14 +1197,16 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
        struct vb2_queue *q = vb->vb2_queue;
        void *mem_priv;
        unsigned int plane;
-       int ret;
+       int ret = 0;
        enum dma_data_direction dma_dir =
                q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        bool reacquired = vb->planes[0].mem_priv == NULL;
 
        memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
        /* Copy relevant information provided by the userspace */
-       ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+       if (pb)
+               ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+                                vb, pb, planes);
        if (ret)
                return ret;
 
@@ -1220,6 +1227,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
                if (planes[plane].length < vb->planes[plane].min_length) {
                        dprintk(1, "invalid dmabuf length for plane %d\n",
                                plane);
+                       dma_buf_put(dbuf);
                        ret = -EINVAL;
                        goto err;
                }
@@ -1520,7 +1528,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
        q->waiting_for_buffers = false;
        vb->state = VB2_BUF_STATE_QUEUED;
 
-       call_void_bufop(q, copy_timestamp, vb, pb);
+       if (pb)
+               call_void_bufop(q, copy_timestamp, vb, pb);
 
        trace_vb2_qbuf(q, vb);
 
@@ -1532,7 +1541,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
                __enqueue_in_driver(vb);
 
        /* Fill buffer information for the userspace */
-       call_void_bufop(q, fill_user_buffer, vb, pb);
+       if (pb)
+               call_void_bufop(q, fill_user_buffer, vb, pb);
 
        /*
         * If streamon has been called, and we haven't yet called
@@ -1731,7 +1741,8 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
  * The return values from this function are intended to be directly returned
  * from vidioc_dqbuf handler in driver.
  */
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+                  bool nonblocking)
 {
        struct vb2_buffer *vb = NULL;
        int ret;
@@ -1754,8 +1765,12 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
 
        call_void_vb_qop(vb, buf_finish, vb);
 
+       if (pindex)
+               *pindex = vb->index;
+
        /* Fill buffer information for the userspace */
-       call_void_bufop(q, fill_user_buffer, vb, pb);
+       if (pb)
+               call_void_bufop(q, fill_user_buffer, vb, pb);
 
        /* Remove from videobuf queue */
        list_del(&vb->queued_entry);
@@ -1828,7 +1843,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
         * that's done in dqbuf, but that's not going to happen when we
         * cancel the whole queue. Note: this code belongs here, not in
         * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
-        * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+        * call to __fill_user_buffer() after buf_finish(). That order can't
         * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
         */
        for (i = 0; i < q->num_buffers; ++i) {
@@ -2357,7 +2372,6 @@ struct vb2_fileio_data {
        unsigned int count;
        unsigned int type;
        unsigned int memory;
-       struct vb2_buffer *b;
        struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
        unsigned int cur_index;
        unsigned int initial_index;
@@ -2410,12 +2424,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
        if (fileio == NULL)
                return -ENOMEM;
 
-       fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL);
-       if (fileio->b == NULL) {
-               kfree(fileio);
-               return -ENOMEM;
-       }
-
        fileio->read_once = q->fileio_read_once;
        fileio->write_immediately = q->fileio_write_immediately;
 
@@ -2460,13 +2468,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
                 * Queue all buffers.
                 */
                for (i = 0; i < q->num_buffers; i++) {
-                       struct vb2_buffer *b = fileio->b;
-
-                       memset(b, 0, q->buf_struct_size);
-                       b->type = q->type;
-                       b->memory = q->memory;
-                       b->index = i;
-                       ret = vb2_core_qbuf(q, i, b);
+                       ret = vb2_core_qbuf(q, i, NULL);
                        if (ret)
                                goto err_reqbufs;
                        fileio->bufs[i].queued = 1;
@@ -2511,7 +2513,6 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
                q->fileio = NULL;
                fileio->count = 0;
                vb2_core_reqbufs(q, fileio->memory, &fileio->count);
-               kfree(fileio->b);
                kfree(fileio);
                dprintk(3, "file io emulator closed\n");
        }
@@ -2539,7 +2540,8 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
         * else is able to provide this information with the write() operation.
         */
        bool copy_timestamp = !read && q->copy_timestamp;
-       int ret, index;
+       unsigned index;
+       int ret;
 
        dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
                read ? "read" : "write", (long)*ppos, count,
@@ -2564,22 +2566,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
         */
        index = fileio->cur_index;
        if (index >= q->num_buffers) {
-               struct vb2_buffer *b = fileio->b;
+               struct vb2_buffer *b;
 
                /*
                 * Call vb2_dqbuf to get buffer back.
                 */
-               memset(b, 0, q->buf_struct_size);
-               b->type = q->type;
-               b->memory = q->memory;
-               ret = vb2_core_dqbuf(q, b, nonblock);
+               ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
                dprintk(5, "vb2_dqbuf result: %d\n", ret);
                if (ret)
                        return ret;
                fileio->dq_count += 1;
 
-               fileio->cur_index = index = b->index;
+               fileio->cur_index = index;
                buf = &fileio->bufs[index];
+               b = q->bufs[index];
 
                /*
                 * Get number of bytes filled by the driver
@@ -2630,7 +2630,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
         * Queue next buffer if required.
         */
        if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
-               struct vb2_buffer *b = fileio->b;
+               struct vb2_buffer *b = q->bufs[index];
 
                /*
                 * Check if this is the last buffer to read.
@@ -2643,15 +2643,11 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
                /*
                 * Call vb2_qbuf and give buffer to the driver.
                 */
-               memset(b, 0, q->buf_struct_size);
-               b->type = q->type;
-               b->memory = q->memory;
-               b->index = index;
                b->planes[0].bytesused = buf->pos;
 
                if (copy_timestamp)
                        b->timestamp = ktime_get_ns();
-               ret = vb2_core_qbuf(q, index, b);
+               ret = vb2_core_qbuf(q, index, NULL);
                dprintk(5, "vb2_dbuf result: %d\n", ret);
                if (ret)
                        return ret;
@@ -2713,10 +2709,9 @@ static int vb2_thread(void *data)
 {
        struct vb2_queue *q = data;
        struct vb2_threadio_data *threadio = q->threadio;
-       struct vb2_fileio_data *fileio = q->fileio;
        bool copy_timestamp = false;
-       int prequeue = 0;
-       int index = 0;
+       unsigned prequeue = 0;
+       unsigned index = 0;
        int ret = 0;
 
        if (q->is_output) {
@@ -2728,37 +2723,34 @@ static int vb2_thread(void *data)
 
        for (;;) {
                struct vb2_buffer *vb;
-               struct vb2_buffer *b = fileio->b;
 
                /*
                 * Call vb2_dqbuf to get buffer back.
                 */
-               memset(b, 0, q->buf_struct_size);
-               b->type = q->type;
-               b->memory = q->memory;
                if (prequeue) {
-                       b->index = index++;
+                       vb = q->bufs[index++];
                        prequeue--;
                } else {
                        call_void_qop(q, wait_finish, q);
                        if (!threadio->stop)
-                               ret = vb2_core_dqbuf(q, b, 0);
+                               ret = vb2_core_dqbuf(q, &index, NULL, 0);
                        call_void_qop(q, wait_prepare, q);
                        dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+                       if (!ret)
+                               vb = q->bufs[index];
                }
                if (ret || threadio->stop)
                        break;
                try_to_freeze();
 
-               vb = q->bufs[b->index];
-               if (b->state == VB2_BUF_STATE_DONE)
+               if (vb->state != VB2_BUF_STATE_ERROR)
                        if (threadio->fnc(vb, threadio->priv))
                                break;
                call_void_qop(q, wait_finish, q);
                if (copy_timestamp)
-                       b->timestamp = ktime_get_ns();;
+                       vb->timestamp = ktime_get_ns();;
                if (!threadio->stop)
-                       ret = vb2_core_qbuf(q, b->index, b);
+                       ret = vb2_core_qbuf(q, vb->index, NULL);
                call_void_qop(q, wait_prepare, q);
                if (ret || threadio->stop)
                        break;
index c9a28605511a71166af35a4ef11fb95ff0eb8fbd..91f552124050d2b3b91d2190af16fbdf2f9d7243 100644 (file)
@@ -625,7 +625,7 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
                return -EINVAL;
        }
 
-       ret = vb2_core_dqbuf(q, b, nonblocking);
+       ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
 
        return ret;
 }
index acd1460cf7871c4ef4cffb0f0e196f41083aae36..2a691da8c1c7c0441c08d97a8d1e711099a32299 100644 (file)
@@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
 
        /* get the Controller level irq */
        fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
-       if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+       if (fsl_ifc_ctrl_dev->irq == 0) {
                dev_err(&dev->dev, "failed to get irq resource "
                                                        "for IFC\n");
                ret = -ENODEV;
index e6e4bacb09ee5d2636576c3363c97a1a5c5f72c8..12099b09a9a71ee5f49e61608b24734e42ca58db 100644 (file)
@@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
 
 static int config_hot_period(u16 val)
 {
@@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
 
        return config_hot_period(cycles32k);
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
 
 int db8500_prcmu_stop_temp_sense(void)
 {
        return config_hot_period(0xFFFF);
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
 
 static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
 {
index 4c1903f781fc1793bb7ddf7205d138e60ea2133a..a21403238a4a683a0062dabfea3a0fd3af9847ab 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/delay.h>
 #include <asm/opal.h>
 #include <asm/msi_bitmap.h>
-#include <asm/pci-bridge.h> /* for struct pci_controller */
 #include <asm/pnv-pci.h>
 #include <asm/io.h>
 
index 677d0362f334e842abb2c8c2439260978249b871..80f9afcb13823282a9859e08a96c36f55901efa6 100644 (file)
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
 {
        struct mei_cl *cl = file->private_data;
 
-       return mei_cl_notify_request(cl, file, request);
+       if (request != MEI_HBM_NOTIFICATION_START &&
+           request != MEI_HBM_NOTIFICATION_STOP)
+               return -EINVAL;
+
+       return mei_cl_notify_request(cl, file, (u8)request);
 }
 
 /**
index 8282f47bcf5d374e03d343453d62ee0bc4851785..845dd27d9f417a07cb6a55000f44504e55b3451c 100644 (file)
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
 {
        uint32_t buf;
        size_t bytes_read;
+       int err;
 
-       if (mtd_read(master, offset, sizeof(buf), &bytes_read,
-                    (uint8_t *)&buf) < 0) {
-               pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
-                       offset);
+       err  = mtd_read(master, offset, sizeof(buf), &bytes_read,
+                       (uint8_t *)&buf);
+       if (err && !mtd_is_bitflip(err)) {
+               pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+                       offset, err);
                goto out_default;
        }
 
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        int trx_part = -1;
        int last_trx_part = -1;
        int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+       int err;
 
        /*
         * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        /* Parse block by block looking for magics */
        for (offset = 0; offset <= master->size - blocksize;
             offset += blocksize) {
-               /* Nothing more in higher memory */
-               if (offset >= 0x2000000)
+               /* Nothing more in higher memory on BCM47XX (MIPS) */
+               if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
                        break;
 
                if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read beginning of the block */
-               if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
-                            &bytes_read, (uint8_t *)buf) < 0) {
-                       pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
-                              offset);
+               err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+                              &bytes_read, (uint8_t *)buf);
+               if (err && !mtd_is_bitflip(err)) {
+                       pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+                              offset, err);
                        continue;
                }
 
@@ -254,10 +258,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read middle of the block */
-               if (mtd_read(master, offset + 0x8000, 0x4,
-                            &bytes_read, (uint8_t *)buf) < 0) {
-                       pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
-                              offset);
+               err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+                              (uint8_t *)buf);
+               if (err && !mtd_is_bitflip(err)) {
+                       pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+                              offset, err);
                        continue;
                }
 
@@ -277,10 +282,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                offset = master->size - possible_nvram_sizes[i];
-               if (mtd_read(master, offset, 0x4, &bytes_read,
-                            (uint8_t *)buf) < 0) {
-                       pr_err("mtd_read error while reading at offset 0x%X!\n",
-                              offset);
+               err = mtd_read(master, offset, 0x4, &bytes_read,
+                              (uint8_t *)buf);
+               if (err && !mtd_is_bitflip(err)) {
+                       pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+                              offset, err);
                        continue;
                }
 
index 20f01b3ec23d7dc29b5b4d4326cd4fc0d766d6be..b253654140d065405bf258ca339c80895b88e77c 100644 (file)
@@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
 config MTD_NAND_GPIO
        tristate "GPIO assisted NAND Flash driver"
        depends on GPIOLIB || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This enables a NAND flash driver where control signals are
          connected to GPIO pins, and commands and data are communicated
@@ -310,6 +311,7 @@ config MTD_NAND_CAFE
 config MTD_NAND_CS553X
        tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
        depends on X86_32
+       depends on !UML && HAS_IOMEM
        help
          The CS553x companion chips for the AMD Geode processor
          include NAND flash controllers with built-in hardware ECC
@@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
 config MTD_NAND_VF610_NFC
        tristate "Support for Freescale NFC for VF610/MPC5125"
        depends on (SOC_VF610 || COMPILE_TEST)
+       depends on HAS_IOMEM
        help
          Enables support for NAND Flash Controller on some Freescale
          processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
index bddcf83d6859aecd689797c99d5745569f39b713..affe7a7e9ad7dcefa1e25366b5a21cf07454f3ad 100644 (file)
@@ -825,7 +825,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
                        *(buf + byte_pos) ^= (1 << bit_pos);
 
                        pos = sector_num * host->pmecc_sector_size + byte_pos;
-                       dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+                       dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
                                pos, bit_pos, err_byte, *(buf + byte_pos));
                } else {
                        /* Bit flip in OOB area */
@@ -835,7 +835,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
                        ecc[tmp] ^= (1 << bit_pos);
 
                        pos = tmp + nand_chip->ecc.layout->eccpos[0];
-                       dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+                       dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
                                pos, bit_pos, err_byte, ecc[tmp]);
                }
 
@@ -1486,8 +1486,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
                ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
 }
 
-static const struct of_device_id atmel_nand_dt_ids[];
-
 static int atmel_of_init_port(struct atmel_nand_host *host,
                              struct device_node *np)
 {
@@ -1498,7 +1496,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
        enum of_gpio_flags flags = 0;
 
        host->caps = (struct atmel_nand_caps *)
-               of_match_device(atmel_nand_dt_ids, host->dev)->data;
+               of_device_get_match_data(host->dev);
 
        if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
                if (val >= 32) {
@@ -1550,7 +1548,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
                if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
                                (val != 24)) {
                        dev_err(host->dev,
-                               "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+                               "Required ECC strength not supported: %u\n",
                                val);
                        return -EINVAL;
                }
@@ -1560,7 +1558,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
        if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
                if ((val != 512) && (val != 1024)) {
                        dev_err(host->dev,
-                               "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+                               "Required ECC sector size not supported: %u\n",
                                val);
                        return -EINVAL;
                }
index 235ddcb58f39932fbd41aa949170000b9dee8891..8122c699ccf20895e400b8ae9246b39db00b1f00 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Freescale GPMI NAND Flash Driver
  *
- * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -136,7 +136,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
  *
  * We may have available oob space in this case.
  */
-static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
 {
        struct bch_geometry *geo = &this->bch_geometry;
        struct nand_chip *chip = &this->nand;
@@ -145,7 +145,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
        unsigned int block_mark_bit_offset;
 
        if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
-               return false;
+               return -EINVAL;
 
        switch (chip->ecc_step_ds) {
        case SZ_512:
@@ -158,19 +158,19 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
                dev_err(this->dev,
                        "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
                        chip->ecc_strength_ds, chip->ecc_step_ds);
-               return false;
+               return -EINVAL;
        }
        geo->ecc_chunk_size = chip->ecc_step_ds;
        geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
        if (!gpmi_check_ecc(this))
-               return false;
+               return -EINVAL;
 
        /* Keep the C >= O */
        if (geo->ecc_chunk_size < mtd->oobsize) {
                dev_err(this->dev,
                        "unsupported nand chip. ecc size: %d, oob size : %d\n",
                        chip->ecc_step_ds, mtd->oobsize);
-               return false;
+               return -EINVAL;
        }
 
        /* The default value, see comment in the legacy_set_geometry(). */
@@ -242,7 +242,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
                                + ALIGN(geo->ecc_chunk_count, 4);
 
        if (!this->swap_block_mark)
-               return true;
+               return 0;
 
        /* For bit swap. */
        block_mark_bit_offset = mtd->writesize * 8 -
@@ -251,7 +251,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
 
        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
-       return true;
+       return 0;
 }
 
 static int legacy_set_geometry(struct gpmi_nand_data *this)
@@ -285,7 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
        geo->ecc_strength = get_ecc_strength(this);
        if (!gpmi_check_ecc(this)) {
                dev_err(this->dev,
-                       "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
+                       "ecc strength: %d cannot be supported by the controller (%d)\n"
+                       "try to use minimum ecc strength that NAND chip required\n",
                        geo->ecc_strength,
                        this->devdata->bch_max_ecc_strength);
                return -EINVAL;
@@ -366,10 +367,11 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
 
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
-       if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
-               && set_geometry_by_ecc_info(this))
-               return 0;
-       return legacy_set_geometry(this);
+       if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
+                               || legacy_set_geometry(this))
+               return set_geometry_by_ecc_info(this);
+
+       return 0;
 }
 
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
@@ -2033,9 +2035,54 @@ static int gpmi_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+       struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+       release_dma_channels(this);
+       return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+       struct gpmi_nand_data *this = dev_get_drvdata(dev);
+       int ret;
+
+       ret = acquire_dma_channels(this);
+       if (ret < 0)
+               return ret;
+
+       /* re-init the GPMI registers */
+       this->flags &= ~GPMI_TIMING_INIT_OK;
+       ret = gpmi_init(this);
+       if (ret) {
+               dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+               return ret;
+       }
+
+       /* re-init the BCH registers */
+       ret = bch_set_geometry(this);
+       if (ret) {
+               dev_err(this->dev, "Error setting BCH : %d\n", ret);
+               return ret;
+       }
+
+       /* re-init others */
+       gpmi_extra_init(this);
+
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+};
+
 static struct platform_driver gpmi_nand_driver = {
        .driver = {
                .name = "gpmi-nand",
+               .pm = &gpmi_pm_ops,
                .of_match_table = gpmi_nand_id_table,
        },
        .probe   = gpmi_nand_probe,
index b19d2a9a5eb97cb36ab64ca4108c5ff9fceb5ca2..673ceb2a0b44b677231e030aa8a0f23a5d412e3a 100644 (file)
@@ -427,9 +427,6 @@ static int jz_nand_probe(struct platform_device *pdev)
        chip->ecc.strength      = 4;
        chip->ecc.options       = NAND_ECC_GENERIC_ERASED_CHECK;
 
-       if (pdata)
-               chip->ecc.layout = pdata->ecc_layout;
-
        chip->chip_delay = 50;
        chip->cmd_ctrl = jz_nand_cmd_ctrl;
        chip->select_chip = jz_nand_select_chip;
index 9bc435d72a861b2a75d650f943a7522dd7e317ac..d8c3e7afcc0bfa74c5d0a87e580ca53a6b28e916 100644 (file)
@@ -750,7 +750,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
 
        nand_chip->ecc.mode = NAND_ECC_HW;
-       nand_chip->ecc.size = mtd->writesize;
+       nand_chip->ecc.size = 512;
        nand_chip->ecc.layout = &lpc32xx_nand_oob;
        host->mlcsubpages = mtd->writesize / 512;
 
index 6b93e899d4e95faf9372689bb290ec0d22b946e6..5d7843ffff6ac0c85c8a5cc9a11901eb603fdd1b 100644 (file)
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
 
 static int mpc5121_nfc_probe(struct platform_device *op)
 {
-       struct device_node *rootnode, *dn = op->dev.of_node;
+       struct device_node *dn = op->dev.of_node;
        struct clk *clk;
        struct device *dev = &op->dev;
        struct mpc5121_nfc_prv *prv;
@@ -712,18 +712,15 @@ static int mpc5121_nfc_probe(struct platform_device *op)
        chip->ecc.mode = NAND_ECC_SOFT;
 
        /* Support external chip-select logic on ADS5121 board */
-       rootnode = of_find_node_by_path("/");
-       if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+       if (of_machine_is_compatible("fsl,mpc5121ads")) {
                retval = ads5121_chipselect_init(mtd);
                if (retval) {
                        dev_err(dev, "Chipselect init error!\n");
-                       of_node_put(rootnode);
                        return retval;
                }
 
                chip->select_chip = ads5121_select_chip;
        }
-       of_node_put(rootnode);
 
        /* Enable NFC clock */
        clk = devm_clk_get(dev, "ipg");
index 4b6a7085b442deaf3a25216415f5a347ae232281..2fbb523df066840d0919ad9e918d02dbd19ac21c 100644 (file)
@@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
 
        return ret;
 }
-
-EXPORT_SYMBOL(nand_scan_bbt);
index a8804a3da0760968db677406d30446c5ca3162ff..ccc05f5b2695f935aa374ce4f3bb7b50e07aa879 100644 (file)
@@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] = {
                  SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
        {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
                { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
-                 SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
-                 4 },
+                 SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+                 NAND_ECC_INFO(40, SZ_1K), 4 },
 
        LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
        LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
index 220ddfcf29f52535cd077f99ab5821a572962412..dbc5b571c2bbcca6a570e5855d2907009abd9b0f 100644 (file)
@@ -113,7 +113,7 @@ static int nuc900_check_rb(struct nuc900_nand *nand)
 {
        unsigned int val;
        spin_lock(&nand->lock);
-       val = __raw_readl(REG_SMISR);
+       val = __raw_readl(nand->reg + REG_SMISR);
        val &= READYBUSY;
        spin_unlock(&nand->lock);
 
index a0e26dea1424bb3ef0053836512a0a6a3c085265..e4e50da30444fc2dc2a4c6d936680f74525dac53 100644 (file)
@@ -73,7 +73,6 @@ static int plat_nand_probe(struct platform_device *pdev)
        data->chip.bbt_options |= pdata->chip.bbt_options;
 
        data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
-       data->chip.ecc.layout = pdata->chip.ecclayout;
        data->chip.ecc.mode = NAND_ECC_SOFT;
 
        platform_set_drvdata(pdev, data);
index 86fc245dc71a2395227a6b7283fa47ff1cc2fae1..e42496adda8dbf2a4a149f5197d42c1345ea3ac4 100644 (file)
 #define READ_ID_BYTES          7
 
 /* macros for registers read/write */
-#define nand_writel(info, off, val)    \
-       writel_relaxed((val), (info)->mmio_base + (off))
-
-#define nand_readl(info, off)          \
-       readl_relaxed((info)->mmio_base + (off))
+#define nand_writel(info, off, val)                                    \
+       do {                                                            \
+               dev_vdbg(&info->pdev->dev,                              \
+                        "%s():%d nand_writel(0x%x, 0x%04x)\n",         \
+                        __func__, __LINE__, (val), (off));             \
+               writel_relaxed((val), (info)->mmio_base + (off));       \
+       } while (0)
+
+#define nand_readl(info, off)                                          \
+       ({                                                              \
+               unsigned int _v;                                        \
+               _v = readl_relaxed((info)->mmio_base + (off));          \
+               dev_vdbg(&info->pdev->dev,                              \
+                        "%s():%d nand_readl(0x%04x) = 0x%x\n",         \
+                        __func__, __LINE__, (off), _v);                \
+               _v;                                                     \
+       })
 
 /* error code and state */
 enum {
index 01ac74fa3b95244d5c2bf455b664f0834c2940ae..9c9397b54b2ca9a826e27fc7625108100ae2fb87 100644 (file)
@@ -861,9 +861,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
        chip->ecc.mode      = NAND_ECC_SOFT;
 #endif
 
-       if (set->ecc_layout != NULL)
-               chip->ecc.layout = set->ecc_layout;
-
        if (set->disable_ecc)
                chip->ecc.mode  = NAND_ECC_NONE;
 
index 51e10a35fe08c29d6a27ef94563b4a8036f7e41a..b5ea6b312df08b3467fcc85220a0f1e01cd263c2 100644 (file)
@@ -60,6 +60,7 @@
 #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
 #define NFC_REG_USER_DATA(x)   (0x0050 + ((x) * 4))
 #define NFC_REG_SPARE_AREA     0x00A0
+#define NFC_REG_PAT_ID         0x00A4
 #define NFC_RAM0_BASE          0x0400
 #define NFC_RAM1_BASE          0x0800
 
@@ -538,6 +539,174 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
        sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
 }
 
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+       0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+       0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+       0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+       0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+       0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+       0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+       0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+       0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+       0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+       0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+       0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+       0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+       0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+       0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+       0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+       0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+       0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+       0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+       0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+       0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+       0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+       0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+       0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+       0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+       0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+       0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+       0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+       0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+       0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+       0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+       0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+       0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+       0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+       0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+       0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+       0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+       0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+       0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+       0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+       0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+       0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+       0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+       0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+       0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+       0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+       0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+       0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+       0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+       state &= 0x7fff;
+
+       /*
+        * This loop is just a simple implementation of a Fibonacci LFSR using
+        * the x16 + x15 + 1 polynomial.
+        */
+       while (count--)
+               state = ((state >> 1) |
+                        (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+       return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+{
+       const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+       int mod = mtd_div_by_ws(mtd->erasesize, mtd);
+
+       if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+               mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+       if (ecc) {
+               if (mtd->ecc_step_size == 512)
+                       seeds = sunxi_nfc_randomizer_ecc512_seeds;
+               else
+                       seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+       }
+
+       return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
+                                       int page, bool ecc)
+{
+       struct nand_chip *nand = mtd->priv;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+       u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+       u16 state;
+
+       if (!(nand->options & NAND_NEED_SCRAMBLING))
+               return;
+
+       ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+       state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+       ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+       writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd->priv;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+       if (!(nand->options & NAND_NEED_SCRAMBLING))
+               return;
+
+       writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+              nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd->priv;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+       if (!(nand->options & NAND_NEED_SCRAMBLING))
+               return;
+
+       writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+              nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+{
+       u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+
+       bbm[0] ^= state;
+       bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+                                          const uint8_t *buf, int len,
+                                          bool ecc, int page)
+{
+       sunxi_nfc_randomizer_config(mtd, page, ecc);
+       sunxi_nfc_randomizer_enable(mtd);
+       sunxi_nfc_write_buf(mtd, buf, len);
+       sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+                                         int len, bool ecc, int page)
+{
+       sunxi_nfc_randomizer_config(mtd, page, ecc);
+       sunxi_nfc_randomizer_enable(mtd);
+       sunxi_nfc_read_buf(mtd, buf, len);
+       sunxi_nfc_randomizer_disable(mtd);
+}
+
 static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
@@ -574,18 +743,20 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
                                       u8 *data, int data_off,
                                       u8 *oob, int oob_off,
                                       int *cur_off,
-                                      unsigned int *max_bitflips)
+                                      unsigned int *max_bitflips,
+                                      bool bbm, int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
+       int raw_mode = 0;
        u32 status;
        int ret;
 
        if (*cur_off != data_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
 
-       sunxi_nfc_read_buf(mtd, NULL, ecc->size);
+       sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
 
        if (data_off + ecc->size != oob_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
@@ -594,25 +765,54 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
        if (ret)
                return ret;
 
+       sunxi_nfc_randomizer_enable(mtd);
        writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
               nfc->regs + NFC_REG_CMD);
 
        ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+       sunxi_nfc_randomizer_disable(mtd);
        if (ret)
                return ret;
 
+       *cur_off = oob_off + ecc->bytes + 4;
+
        status = readl(nfc->regs + NFC_REG_ECC_ST);
+       if (status & NFC_ECC_PAT_FOUND(0)) {
+               u8 pattern = 0xff;
+
+               if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
+                       pattern = 0x0;
+
+               memset(data, pattern, ecc->size);
+               memset(oob, pattern, ecc->bytes + 4);
+
+               return 1;
+       }
+
        ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
 
        memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
 
        nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-       sunxi_nfc_read_buf(mtd, oob, ecc->bytes + 4);
+       sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
 
        if (status & NFC_ECC_ERR(0)) {
+               /*
+                * Re-read the data with the randomizer disabled to identify
+                * bitflips in erased pages.
+                */
+               if (nand->options & NAND_NEED_SCRAMBLING) {
+                       nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+                       nand->read_buf(mtd, data, ecc->size);
+                       nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+                       nand->read_buf(mtd, oob, ecc->bytes + 4);
+               }
+
                ret = nand_check_erased_ecc_chunk(data, ecc->size,
                                                  oob, ecc->bytes + 4,
                                                  NULL, 0, ecc->strength);
+               if (ret >= 0)
+                       raw_mode = 1;
        } else {
                /*
                 * The engine protects 4 bytes of OOB data per chunk.
@@ -620,6 +820,10 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
                 */
                sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
                                           oob);
+
+               /* De-randomize the Bad Block Marker. */
+               if (bbm && nand->options & NAND_NEED_SCRAMBLING)
+                       sunxi_nfc_randomize_bbm(mtd, page, oob);
        }
 
        if (ret < 0) {
@@ -629,13 +833,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
                *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
        }
 
-       *cur_off = oob_off + ecc->bytes + 4;
-
-       return 0;
+       return raw_mode;
 }
 
 static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
-                                           u8 *oob, int *cur_off)
+                                           u8 *oob, int *cur_off,
+                                           bool randomize, int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -649,7 +852,11 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
                nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
                              offset + mtd->writesize, -1);
 
-       sunxi_nfc_read_buf(mtd, oob + offset, len);
+       if (!randomize)
+               sunxi_nfc_read_buf(mtd, oob + offset, len);
+       else
+               sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+                                             false, page);
 
        *cur_off = mtd->oobsize + mtd->writesize;
 }
@@ -662,7 +869,8 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
                                        const u8 *data, int data_off,
                                        const u8 *oob, int oob_off,
-                                       int *cur_off)
+                                       int *cur_off, bool bbm,
+                                       int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
@@ -672,11 +880,20 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
        if (data_off != *cur_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
 
-       sunxi_nfc_write_buf(mtd, data, ecc->size);
+       sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
 
        /* Fill OOB data in */
-       writel(sunxi_nfc_buf_to_user_data(oob),
-              nfc->regs + NFC_REG_USER_DATA(0));
+       if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
+               u8 user_data[4];
+
+               memcpy(user_data, oob, 4);
+               sunxi_nfc_randomize_bbm(mtd, page, user_data);
+               writel(sunxi_nfc_buf_to_user_data(user_data),
+                      nfc->regs + NFC_REG_USER_DATA(0));
+       } else {
+               writel(sunxi_nfc_buf_to_user_data(oob),
+                      nfc->regs + NFC_REG_USER_DATA(0));
+       }
 
        if (data_off + ecc->size != oob_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
@@ -685,11 +902,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
        if (ret)
                return ret;
 
+       sunxi_nfc_randomizer_enable(mtd);
        writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
               NFC_ACCESS_DIR | NFC_ECC_OP,
               nfc->regs + NFC_REG_CMD);
 
        ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+       sunxi_nfc_randomizer_disable(mtd);
        if (ret)
                return ret;
 
@@ -699,7 +918,8 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
 }
 
 static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
-                                            u8 *oob, int *cur_off)
+                                            u8 *oob, int *cur_off,
+                                            int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -713,7 +933,7 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
                nand->cmdfunc(mtd, NAND_CMD_RNDIN,
                              offset + mtd->writesize, -1);
 
-       sunxi_nfc_write_buf(mtd, oob + offset, len);
+       sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
 
        *cur_off = mtd->oobsize + mtd->writesize;
 }
@@ -725,6 +945,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
        struct nand_ecc_ctrl *ecc = &chip->ecc;
        unsigned int max_bitflips = 0;
        int ret, i, cur_off = 0;
+       bool raw_mode = false;
 
        sunxi_nfc_hw_ecc_enable(mtd);
 
@@ -736,13 +957,17 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
 
                ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
                                                  oob_off + mtd->writesize,
-                                                 &cur_off, &max_bitflips);
-               if (ret)
+                                                 &cur_off, &max_bitflips,
+                                                 !i, page);
+               if (ret < 0)
                        return ret;
+               else if (ret)
+                       raw_mode = true;
        }
 
        if (oob_required)
-               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+                                               !raw_mode, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -767,13 +992,14 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
 
                ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
                                                   oob_off + mtd->writesize,
-                                                  &cur_off);
+                                                  &cur_off, !i, page);
                if (ret)
                        return ret;
        }
 
-       if (oob_required)
-               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
+       if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+                                                &cur_off, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -788,6 +1014,7 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
        struct nand_ecc_ctrl *ecc = &chip->ecc;
        unsigned int max_bitflips = 0;
        int ret, i, cur_off = 0;
+       bool raw_mode = false;
 
        sunxi_nfc_hw_ecc_enable(mtd);
 
@@ -799,13 +1026,16 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
 
                ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
                                                  oob_off, &cur_off,
-                                                 &max_bitflips);
-               if (ret)
+                                                 &max_bitflips, !i, page);
+               if (ret < 0)
                        return ret;
+               else if (ret)
+                       raw_mode = true;
        }
 
        if (oob_required)
-               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+                                               !raw_mode, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -829,13 +1059,15 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
                const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
 
                ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
-                                                  oob, oob_off, &cur_off);
+                                                  oob, oob_off, &cur_off,
+                                                  false, page);
                if (ret)
                        return ret;
        }
 
-       if (oob_required)
-               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
+       if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+                                                &cur_off, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -1345,6 +1577,9 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
        if (nand->bbt_options & NAND_BBT_USE_FLASH)
                nand->bbt_options |= NAND_BBT_NO_OOB;
 
+       if (nand->options & NAND_NEED_SCRAMBLING)
+               nand->options |= NAND_NO_SUBPAGE_WRITE;
+
        ret = sunxi_nand_chip_init_timings(chip, np);
        if (ret) {
                dev_err(dev, "could not configure chip timings: %d\n", ret);
index 034420f313d500634e4ad66e46129535fac619d7..293feb19b0b149c7add9d4e2bf7dc0f2a8313275 100644 (file)
@@ -795,8 +795,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
                        goto error;
                }
 
-               /* propagate ecc.layout to mtd_info */
-               mtd->ecclayout = chip->ecc.layout;
                chip->ecc.read_page = vf610_nfc_read_page;
                chip->ecc.write_page = vf610_nfc_write_page;
 
index 08d0085f3e939fb277cc9c21b3b3004eab662206..680188a881301ee195af0320694324da9ca2caeb 100644 (file)
@@ -179,7 +179,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
  * by the onenand_release function.
  *
  */
-int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
 {
        struct onenand_chip *this = mtd->priv;
        struct bbm_info *bbm = this->bbm;
@@ -247,6 +247,3 @@ int onenand_default_bbt(struct mtd_info *mtd)
 
        return onenand_scan_bbt(mtd, bbm->badblock_pattern);
 }
-
-EXPORT_SYMBOL(onenand_scan_bbt);
-EXPORT_SYMBOL(onenand_default_bbt);
index 0dc927540b3d1627fbe77fce5e3bff09414fe4fb..83befab4b5b4a13121b2dc1b4c816baaca6e238a 100644 (file)
@@ -9,6 +9,7 @@ if MTD_SPI_NOR
 
 config MTD_MT81xx_NOR
        tristate "Mediatek MT81xx SPI NOR flash controller"
+       depends on HAS_IOMEM
        help
          This enables access to SPI NOR flash, using MT81xx SPI NOR flash
          controller. This controller does not support generic SPI BUS, it only
index d5f850d035bb93fb3a7c6547901ddfe84c3c6f04..8bed1a4cb79ce585d88623dc7d6784efaf58ccb7 100644 (file)
@@ -371,8 +371,8 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
        return ret;
 }
 
-static int __init mtk_nor_init(struct mt8173_nor *mt8173_nor,
-                              struct device_node *flash_node)
+static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+                       struct device_node *flash_node)
 {
        int ret;
        struct spi_nor *nor;
index 4cbb8b27a891237a2541c6fcf506ae2e0db08686..ee94056dbb2ea6e239905ecb2e46c9b8d737565e 100644 (file)
@@ -357,6 +357,14 @@ static u8 __get_duplex(struct port *port)
        return retval;
 }
 
+static void __ad_actor_update_port(struct port *port)
+{
+       const struct bonding *bond = bond_get_bond_by_slave(port->slave);
+
+       port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+       port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+}
+
 /* Conversions */
 
 /**
@@ -1963,9 +1971,7 @@ void bond_3ad_bind_slave(struct slave *slave)
                port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
                ad_update_actor_keys(port, false);
                /* actor system is the bond's system */
-               port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
-               port->actor_system_priority =
-                   BOND_AD_INFO(bond).system.sys_priority;
+               __ad_actor_update_port(port);
                /* tx timer(to verify that no more than MAX_TX_IN_SECOND
                 * lacpdu's are sent in one second)
                 */
@@ -2147,6 +2153,34 @@ out:
        spin_unlock_bh(&bond->mode_lock);
 }
 
+/**
+ * bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports
+ * @bond: bonding struct to work on
+ *
+ * If an ad_actor setting gets changed we need to update the individual port
+ * settings so the bond device will use the new values when it gets upped.
+ */
+void bond_3ad_update_ad_actor_settings(struct bonding *bond)
+{
+       struct list_head *iter;
+       struct slave *slave;
+
+       ASSERT_RTNL();
+
+       BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio;
+       if (is_zero_ether_addr(bond->params.ad_actor_system))
+               BOND_AD_INFO(bond).system.sys_mac_addr =
+                   *((struct mac_addr *)bond->dev->dev_addr);
+       else
+               BOND_AD_INFO(bond).system.sys_mac_addr =
+                   *((struct mac_addr *)bond->params.ad_actor_system);
+
+       spin_lock_bh(&bond->mode_lock);
+       bond_for_each_slave(bond, slave, iter)
+               __ad_actor_update_port(&(SLAVE_AD_INFO(slave)->port));
+       spin_unlock_bh(&bond->mode_lock);
+}
+
 /**
  * bond_3ad_state_machine_handler - handle state machines timeout
  * @bond: bonding struct to work on
index 56b560558884dc6d87a0081d7d99c96ac4e99a67..705cb0198faa7c065b4724f6019f0f117e99f0d5 100644 (file)
@@ -618,8 +618,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
 static void bond_set_dev_addr(struct net_device *bond_dev,
                              struct net_device *slave_dev)
 {
-       netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
-                  bond_dev, slave_dev, slave_dev->addr_len);
+       netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
+                  bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
        memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
        bond_dev->addr_assign_type = NET_ADDR_STOLEN;
        call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
@@ -928,11 +928,10 @@ void bond_select_active_slave(struct bonding *bond)
                if (!rv)
                        return;
 
-               if (netif_carrier_ok(bond->dev)) {
+               if (netif_carrier_ok(bond->dev))
                        netdev_info(bond->dev, "first active interface up!\n");
-               } else {
+               else
                        netdev_info(bond->dev, "now running without any active interface!\n");
-               }
        }
 }
 
@@ -1178,9 +1177,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       if (bond_should_deliver_exact_match(skb, slave, bond)) {
+       if (bond_should_deliver_exact_match(skb, slave, bond))
                return RX_HANDLER_EXACT;
-       }
 
        skb->dev = bond->dev;
 
@@ -1241,7 +1239,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
 {
        struct slave *slave = NULL;
 
-       slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+       slave = kzalloc(sizeof(*slave), GFP_KERNEL);
        if (!slave)
                return NULL;
 
@@ -3309,6 +3307,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
                stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
                stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
                stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
+               stats->rx_nohandler += sstats->rx_nohandler - pstats->rx_nohandler;
 
                stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
                stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
index 55e93b6b6d2150f2687f36bdeebe5db8c4ab2b01..ed0bdae64f5e436e082249b2f38fb51ab694e3a1 100644 (file)
@@ -1392,6 +1392,8 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
                    newval->value);
 
        bond->params.ad_actor_sys_prio = newval->value;
+       bond_3ad_update_ad_actor_settings(bond);
+
        return 0;
 }
 
@@ -1418,6 +1420,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
 
        netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
        ether_addr_copy(bond->params.ad_actor_system, mac);
+       bond_3ad_update_ad_actor_settings(bond);
+
        return 0;
 
 err:
index 9fe33fc3c2b9a54ddbd0db4e8a03d750553b6d7c..cf34681af4f625d07bc43c77d528d632325bdc0d 100644 (file)
@@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
 
        /* no PVID with ranges, otherwise it's a bug */
        if (pvid)
-               err = _mv88e6xxx_port_pvid_set(ds, port, vid);
+               err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
 unlock:
        mutex_unlock(&ps->smi_mutex);
 
@@ -2163,7 +2163,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * database, and allow every port to egress frames on all other ports.
         */
        reg = BIT(ps->num_ports) - 1; /* all ports */
-       ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
+       reg &= ~BIT(port); /* except itself */
+       ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
        if (ret)
                goto abort;
 
index a4799c1fc7d4de55c38a0e1a99c1aa42985d5143..5eb9b20c0eeab09954a909290ac5bd9bf3fdcb06 100644 (file)
@@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
        int ret;
 
        ring = pdata->rx_ring;
+       irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
        ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
                               IRQF_SHARED, ring->irq_name, ring);
        if (ret)
@@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
 
        if (pdata->cq_cnt) {
                ring = pdata->tx_ring->cp_ring;
+               irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
                ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
                                       IRQF_SHARED, ring->irq_name, ring);
                if (ret) {
@@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev)
 static void xgene_enet_free_irq(struct net_device *ndev)
 {
        struct xgene_enet_pdata *pdata;
+       struct xgene_enet_desc_ring *ring;
        struct device *dev;
 
        pdata = netdev_priv(ndev);
        dev = ndev_to_dev(ndev);
-       devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
+       ring = pdata->rx_ring;
+       irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+       devm_free_irq(dev, ring->irq, ring);
 
        if (pdata->cq_cnt) {
-               devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
-                             pdata->tx_ring->cp_ring);
+               ring = pdata->tx_ring->cp_ring;
+               irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+               devm_free_irq(dev, ring->irq, ring);
        }
 }
 
index 70d5b62c125a489ceb5fafaf21b96988cecaab5c..248dfc40a7611a0b298336c0aa552665da165ba7 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/efi.h>
+#include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
index ecc4a334c50727c7a93b07576b540fc4b5b53d58..f71ab2647a3bc1f62f6b7450b34dd325c710549d 100644 (file)
@@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
        nb8800_tx_done(dev);
 
 again:
-       while (work < budget) {
+       do {
                struct nb8800_rx_buf *rxb;
                unsigned int len;
 
@@ -330,7 +330,7 @@ again:
                rxd->report = 0;
                last = next;
                work++;
-       }
+       } while (work < budget);
 
        if (work) {
                priv->rx_descs[last].desc.config |= DESC_EOC;
index 8550df189ceb709290c8b1caad4a3132dcb6a810..19f7cd02e08527ec234b1e8de426bad1fd37cadb 100644 (file)
@@ -151,8 +151,11 @@ config BNX2X_VXLAN
 
 config BGMAC
        tristate "BCMA bus GBit core support"
-       depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
+       depends on BCMA && BCMA_HOST_SOC
+       depends on HAS_DMA
+       depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
        select PHYLIB
+       select FIXED_PHY
        ---help---
          This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
          They can be found on BCM47xx SoCs and provide gigabit ethernet.
index 06f6cffdfaf54a6dd56209f2ae9ac38c6f508fc4..230f8e6209e57f22e1c5fc4c3cf3a6ade4aca95f 100644 (file)
@@ -26,6 +26,17 @@ static const struct bcma_device_id bgmac_bcma_tbl[] = {
 };
 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
 
+static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
+{
+       switch (bgmac->core->bus->chipinfo.id) {
+       case BCMA_CHIP_ID_BCM4707:
+       case BCMA_CHIP_ID_BCM53018:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
                             u32 value, int timeout)
 {
@@ -987,11 +998,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
 static void bgmac_miiconfig(struct bgmac *bgmac)
 {
        struct bcma_device *core = bgmac->core;
-       struct bcma_chipinfo *ci = &core->bus->chipinfo;
        u8 imode;
 
-       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
-           ci->id == BCMA_CHIP_ID_BCM53018) {
+       if (bgmac_is_bcm4707_family(bgmac)) {
                bcma_awrite32(core, BCMA_IOCTL,
                              bcma_aread32(core, BCMA_IOCTL) | 0x40 |
                              BGMAC_BCMA_IOCTL_SW_CLKEN);
@@ -1055,9 +1064,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
        }
 
        /* Request Misc PLL for corerev > 2 */
-       if (core->id.rev > 2 &&
-           ci->id != BCMA_CHIP_ID_BCM4707 &&
-           ci->id != BCMA_CHIP_ID_BCM53018) {
+       if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
                bgmac_set(bgmac, BCMA_CLKCTLST,
                          BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
                bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
@@ -1193,8 +1200,7 @@ static void bgmac_enable(struct bgmac *bgmac)
                break;
        }
 
-       if (ci->id != BCMA_CHIP_ID_BCM4707 &&
-           ci->id != BCMA_CHIP_ID_BCM53018) {
+       if (!bgmac_is_bcm4707_family(bgmac)) {
                rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
                rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
                bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
@@ -1472,14 +1478,12 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
 
 static int bgmac_mii_register(struct bgmac *bgmac)
 {
-       struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
        struct mii_bus *mii_bus;
        struct phy_device *phy_dev;
        char bus_id[MII_BUS_ID_SIZE + 3];
        int err = 0;
 
-       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
-           ci->id == BCMA_CHIP_ID_BCM53018)
+       if (bgmac_is_bcm4707_family(bgmac))
                return bgmac_fixed_phy_register(bgmac);
 
        mii_bus = mdiobus_alloc();
@@ -1539,7 +1543,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac)
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
 static int bgmac_probe(struct bcma_device *core)
 {
-       struct bcma_chipinfo *ci = &core->bus->chipinfo;
        struct net_device *net_dev;
        struct bgmac *bgmac;
        struct ssb_sprom *sprom = &core->bus->sprom;
@@ -1620,8 +1623,7 @@ static int bgmac_probe(struct bcma_device *core)
        bgmac_chip_reset(bgmac);
 
        /* For Northstar, we have to take all GMAC core out of reset */
-       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
-           ci->id == BCMA_CHIP_ID_BCM53018) {
+       if (bgmac_is_bcm4707_family(bgmac)) {
                struct bcma_device *ns_core;
                int ns_gmac;
 
index df835f5e46d85f374cb1e0d9adf1fd8e16cb40f3..5dc89e527e7deefe04c831d9ec556ed76ac40d26 100644 (file)
@@ -1490,10 +1490,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
 
                        last = tx_buf->nr_frags;
                        j += 2;
-                       for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+                       for (k = 0; k < last; k++, j++) {
+                               int ring_idx = j & bp->tx_ring_mask;
                                skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
 
-                               tx_buf = &txr->tx_buf_ring[j];
+                               tx_buf = &txr->tx_buf_ring[ring_idx];
                                dma_unmap_page(
                                        &pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
@@ -3406,7 +3407,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
        struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
        u16 error_code;
 
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
        req.ring_type = ring_type;
        req.ring_id = cpu_to_le16(ring->fw_ring_id);
 
@@ -4819,8 +4820,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 
                stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
 
-               stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
-
                stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
        }
 
index 0d775964b06016bfcc66352c99c0164c6957d773..457c3bc8cfff49654b45e879495e5cbfa88428d6 100644 (file)
@@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
         * Ethernet MAC ISRs
         */
        if (priv->internal_phy)
-               priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT;
+               priv->phydev->irq = PHY_IGNORE_INTERRUPT;
 
        return 0;
 }
index 9293675df7ba99c44b2f77c739fbe12da0e86093..3010080cfeee350a2e523ce0c26aeb427953f21a 100644 (file)
@@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
        return ret;
 }
 
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+       /* Check if we will never have enough descriptors,
+        * as gso_segs can be more than current ring size
+        */
+       return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 
 /* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * vlan encapsulated.
                 */
                if (skb->protocol == htons(ETH_P_8021Q) ||
-                   skb->protocol == htons(ETH_P_8021AD))
-                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (tg3_tso_bug_gso_check(tnapi, skb))
+                               return tg3_tso_bug(tp, tnapi, txq, skb);
+                       goto drop;
+               }
 
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-                           tg3_flag(tp, TSO_BUG))
-                               return tg3_tso_bug(tp, tnapi, txq, skb);
-
+                           tg3_flag(tp, TSO_BUG)) {
+                               if (tg3_tso_bug_gso_check(tnapi, skb))
+                                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                               goto drop;
+                       }
                        ip_csum = iph->check;
                        ip_tot_len = iph->tot_len;
                        iph->check = 0;
@@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (would_hit_hwbug) {
                tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
-               if (mss) {
+               if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
                        /* If it's a TSO packet, do GSO instead of
                         * allocating and copying to a large linear SKB
                         */
@@ -12016,7 +12029,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        int ret;
        u32 offset, len, b_offset, odd_len;
        u8 *buf;
-       __be32 start, end;
+       __be32 start = 0, end;
 
        if (tg3_flag(tp, NO_NVRAM) ||
            eeprom->magic != TG3_EEPROM_MAGIC)
index 9d9984a87d4227f0b79c01e5ff1a3877da41b7e3..50c94104f19cc5d90b5fb3d4643bae1d4624e127 100644 (file)
@@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct device_node *phy_node;
        const struct macb_config *macb_config = NULL;
-       struct clk *pclk, *hclk, *tx_clk;
+       struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
        unsigned int queue_mask, num_queues;
        struct macb_platform_data *pdata;
        bool native_io;
index b89504405b7207f12663aee0d6d3ecf78d392235..872765527081ab3d48d7701f8a9d8705553640fc 100644 (file)
@@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
                                struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct lio *lio = container_of(ptp, struct lio, ptp_info);
        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
@@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
        ns += lio->ptp_adjust;
        spin_unlock_irqrestore(&lio->ptp_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
index 1671fa3332c2d3011db8c308a17ea5a9bf8b3fb2..7ba6d530b0c0ab6e3d1c3d4ea21b0b950ebe3c3f 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.3.0.12"
+#define DRV_VERSION            "2.3.0.20"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
index 1ffd1050860bb5cde576fd291e0651b351d578ff..1fdf5fe12a9562251e3bcee9fe635872053357a2 100644 (file)
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                          int wait)
 {
        struct devcmd2_controller *dc2c = vdev->devcmd2;
-       struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+       struct devcmd2_result *result;
+       u8 color;
        unsigned int i;
        int delay, err;
        u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
        if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
                return 0;
 
+       result = dc2c->result + dc2c->next_result;
+       color = dc2c->color;
+
+       dc2c->next_result++;
+       if (dc2c->next_result == dc2c->result_size) {
+               dc2c->next_result = 0;
+               dc2c->color = dc2c->color ? 0 : 1;
+       }
+
        for (delay = 0; delay < wait; delay++) {
-               if (result->color == dc2c->color) {
-                       dc2c->next_result++;
-                       if (dc2c->next_result == dc2c->result_size) {
-                               dc2c->next_result = 0;
-                               dc2c->color = dc2c->color ? 0 : 1;
-                       }
+               if (result->color == color) {
                        if (result->error) {
                                err = result->error;
                                if (err != ERR_ECMDUNKNOWN ||
index cf837831304be2f4d7edaebf68880a44f96bd057..515e206589cca9a20e7bae54ac9529849387c088 100644 (file)
 #define BE3_MAX_TX_QS          16
 #define BE3_MAX_EVT_QS         16
 #define BE3_SRIOV_MAX_EVT_QS   8
+#define SH_VF_MAX_NIC_EQS      3       /* Skyhawk VFs can have a max of 4 EQs
+                                        * and at least 1 is granted to either
+                                        * SURF/DPDK
+                                        */
 
 #define MAX_RSS_IFACES         15
 #define MAX_RX_QS              32
@@ -393,6 +397,10 @@ enum vf_state {
 #define BE_UC_PMAC_COUNT                       30
 #define BE_VF_UC_PMAC_COUNT                    2
 
+#define MAX_ERR_RECOVERY_RETRY_COUNT           3
+#define ERR_DETECTION_DELAY                    1000
+#define ERR_RECOVERY_RETRY_DELAY               30000
+
 /* Ethtool set_dump flags */
 #define LANCER_INITIATE_FW_DUMP                        0x1
 #define LANCER_DELETE_FW_DUMP                  0x2
@@ -530,6 +538,7 @@ struct be_adapter {
        u16 work_counter;
 
        struct delayed_work be_err_detection_work;
+       u8 recovery_retries;
        u8 err_flags;
        u32 flags;
        u32 cmd_privileges;
index b63d8ad2e1157a1d6d9f5344711609d6fc7dbe1d..7d51d4733890fb682eced3aac6e2ae906a11777e 100644 (file)
@@ -65,7 +65,22 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
                CMD_SUBSYSTEM_COMMON,
                BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
-       }
+       },
+       {
+               OPCODE_LOWLEVEL_HOST_DDR_DMA,
+               CMD_SUBSYSTEM_LOWLEVEL,
+               BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+       },
+       {
+               OPCODE_LOWLEVEL_LOOPBACK_TEST,
+               CMD_SUBSYSTEM_LOWLEVEL,
+               BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+       },
+       {
+               OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+               CMD_SUBSYSTEM_LOWLEVEL,
+               BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+       },
 };
 
 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -236,7 +251,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 
        if (base_status != MCC_STATUS_SUCCESS &&
            !be_skip_err_log(opcode, base_status, addl_status)) {
-               if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+               if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
+                   addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
                        dev_warn(&adapter->pdev->dev,
                                 "VF is not privileged to issue opcode %d-%d\n",
                                 opcode, subsystem);
@@ -3168,6 +3184,10 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        struct be_cmd_req_set_lmode *req;
        int status;
 
+       if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+                           CMD_SUBSYSTEM_LOWLEVEL))
+               return -EPERM;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -3213,6 +3233,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        struct be_cmd_resp_loopback_test *resp;
        int status;
 
+       if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
+                           CMD_SUBSYSTEM_LOWLEVEL))
+               return -EPERM;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -3259,6 +3283,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
        int status;
        int i, j = 0;
 
+       if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
+                           CMD_SUBSYSTEM_LOWLEVEL))
+               return -EPERM;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
index 241819b36ca72ac6c133875f88d17f4359c0c5d4..f260ef3329a17973ef1d887c450fae1bbb8ae74e 100644 (file)
@@ -68,7 +68,8 @@ enum mcc_addl_status {
        MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
        MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab,
        MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56,
-       MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57
+       MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57,
+       MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60
 };
 
 #define CQE_BASE_STATUS_MASK           0xFFFF
index a19ac441336f7fe52fe991a67837ed9010cd2098..2ff691636dac3b8f518f8c054ce5c90dc8b00ffe 100644 (file)
@@ -720,29 +720,32 @@ static int be_set_phys_id(struct net_device *netdev,
                          enum ethtool_phys_id_state state)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       int status = 0;
 
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
-               be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
-                                       &adapter->beacon_state);
-               return 1;       /* cycle on/off once per second */
+               status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+                                                &adapter->beacon_state);
+               if (status)
+                       return be_cmd_status(status);
+               return 1;       /* cycle on/off once per second */
 
        case ETHTOOL_ID_ON:
-               be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
-                                       BEACON_STATE_ENABLED);
+               status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+                                                0, 0, BEACON_STATE_ENABLED);
                break;
 
        case ETHTOOL_ID_OFF:
-               be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
-                                       BEACON_STATE_DISABLED);
+               status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+                                                0, 0, BEACON_STATE_DISABLED);
                break;
 
        case ETHTOOL_ID_INACTIVE:
-               be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
-                                       adapter->beacon_state);
+               status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+                                                0, 0, adapter->beacon_state);
        }
 
-       return 0;
+       return be_cmd_status(status);
 }
 
 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
index f99de3657ce3b5f58b6f08f1f97f470bb11d7788..9c1fc9dcea250e391526f6a9010985204244b1f5 100644 (file)
@@ -1463,6 +1463,9 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (lancer_chip(adapter) && vid == 0)
                return 0;
 
+       if (!test_bit(vid, adapter->vids))
+               return 0;
+
        clear_bit(vid, adapter->vids);
        adapter->vlans_added--;
 
@@ -1914,8 +1917,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
        if (!aic->enable)
                return 0;
 
-       if (time_before_eq(now, aic->jiffies) ||
-           jiffies_to_msecs(now - aic->jiffies) < 1)
+       if (jiffies_to_msecs(now - aic->jiffies) < 1)
                eqd = aic->prev_eqd;
        else
                eqd = be_get_new_eqd(eqo);
@@ -3789,18 +3791,15 @@ static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
        struct be_resources res = adapter->pool_res;
        u16 num_vf_qs = 1;
 
-       /* Distribute the queue resources equally among the PF and it's VFs
+       /* Distribute the queue resources among the PF and it's VFs
         * Do not distribute queue resources in multi-channel configuration.
         */
        if (num_vfs && !be_is_mc(adapter)) {
-               /* If number of VFs requested is 8 less than max supported,
-                * assign 8 queue pairs to the PF and divide the remaining
-                * resources evenly among the VFs
-                */
-               if (num_vfs < (be_max_vfs(adapter) - 8))
-                       num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
-               else
-                       num_vf_qs = res.max_rss_qs / num_vfs;
+                /* Divide the qpairs evenly among the VFs and the PF, capped
+                 * at VF-EQ-count. Any remainder qpairs belong to the PF.
+                 */
+               num_vf_qs = min(SH_VF_MAX_NIC_EQS,
+                               res.max_rss_qs / (num_vfs + 1));
 
                /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
                 * interfaces per port. Provide RSS on VFs, only if number
@@ -4265,10 +4264,10 @@ static void be_schedule_worker(struct be_adapter *adapter)
        adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
 }
 
-static void be_schedule_err_detection(struct be_adapter *adapter)
+static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
 {
        schedule_delayed_work(&adapter->be_err_detection_work,
-                             msecs_to_jiffies(1000));
+                             msecs_to_jiffies(delay));
        adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
 }
 
@@ -4859,21 +4858,27 @@ static int be_resume(struct be_adapter *adapter)
 
 static int be_err_recover(struct be_adapter *adapter)
 {
-       struct device *dev = &adapter->pdev->dev;
        int status;
 
+       /* Error recovery is supported only Lancer as of now */
+       if (!lancer_chip(adapter))
+               return -EIO;
+
+       /* Wait for adapter to reach quiescent state before
+        * destroying queues
+        */
+       status = be_fw_wait_ready(adapter);
+       if (status)
+               goto err;
+
+       be_cleanup(adapter);
+
        status = be_resume(adapter);
        if (status)
                goto err;
 
-       dev_info(dev, "Adapter recovery successful\n");
        return 0;
 err:
-       if (be_physfn(adapter))
-               dev_err(dev, "Adapter recovery failed\n");
-       else
-               dev_err(dev, "Re-trying adapter recovery\n");
-
        return status;
 }
 
@@ -4882,21 +4887,43 @@ static void be_err_detection_task(struct work_struct *work)
        struct be_adapter *adapter =
                                container_of(work, struct be_adapter,
                                             be_err_detection_work.work);
-       int status = 0;
+       struct device *dev = &adapter->pdev->dev;
+       int recovery_status;
+       int delay = ERR_DETECTION_DELAY;
 
        be_detect_error(adapter);
 
-       if (be_check_error(adapter, BE_ERROR_HW)) {
-               be_cleanup(adapter);
-
-               /* As of now error recovery support is in Lancer only */
-               if (lancer_chip(adapter))
-                       status = be_err_recover(adapter);
+       if (be_check_error(adapter, BE_ERROR_HW))
+               recovery_status = be_err_recover(adapter);
+       else
+               goto reschedule_task;
+
+       if (!recovery_status) {
+               adapter->recovery_retries = 0;
+               dev_info(dev, "Adapter recovery successful\n");
+               goto reschedule_task;
+       } else if (be_virtfn(adapter)) {
+               /* For VFs, check if PF have allocated resources
+                * every second.
+                */
+               dev_err(dev, "Re-trying adapter recovery\n");
+               goto reschedule_task;
+       } else if (adapter->recovery_retries++ <
+                  MAX_ERR_RECOVERY_RETRY_COUNT) {
+               /* In case of another error during recovery, it takes 30 sec
+                * for adapter to come out of error. Retry error recovery after
+                * this time interval.
+                */
+               dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
+               delay = ERR_RECOVERY_RETRY_DELAY;
+               goto reschedule_task;
+       } else {
+               dev_err(dev, "Adapter recovery failed\n");
        }
 
-       /* Always attempt recovery on VFs */
-       if (!status || be_virtfn(adapter))
-               be_schedule_err_detection(adapter);
+       return;
+reschedule_task:
+       be_schedule_err_detection(adapter, delay);
 }
 
 static void be_log_sfp_info(struct be_adapter *adapter)
@@ -5292,7 +5319,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 
        be_roce_dev_add(adapter);
 
-       be_schedule_err_detection(adapter);
+       be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
 
        /* On Die temperature not supported for VF. */
        if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@ -5359,7 +5386,7 @@ static int be_pci_resume(struct pci_dev *pdev)
        if (status)
                return status;
 
-       be_schedule_err_detection(adapter);
+       be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
 
        if (adapter->wol_en)
                be_setup_wol(adapter, false);
@@ -5459,7 +5486,7 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
-       be_schedule_err_detection(adapter);
+       be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
        return;
 err:
        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
index 48ecbc8aaaea2983fdce3b2c9bb08d0578059f41..b423ad380b6a3ff0b31621cf98152143030c2ed6 100644 (file)
@@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP
 config EZCHIP_NPS_MANAGEMENT_ENET
        tristate "EZchip NPS management enet support"
        depends on OF_IRQ && OF_NET
+       depends on HAS_IOMEM
        ---help---
          Simple LAN device for debug or management purposes.
          Device supports interrupts for RX and TX(completion).
index 4097c58d17a7527a3654fc6664e575d97575d4fe..cbe21dc7e37ee41dab11425d673ba0d20409074a 100644 (file)
@@ -4,6 +4,9 @@
 
 obj-$(CONFIG_FEC) += fec.o
 fec-objs :=fec_main.o fec_ptp.o
+CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
+CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
+
 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
        obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
index 99d33e2d35e6c2c219fbd06f34546619a01a586d..2106d72c91dc610fdf7b368e6dbf0a99c98c0a3d 100644 (file)
@@ -19,8 +19,7 @@
 #include <linux/timecounter.h>
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 /*
  *     Just figures, Motorola would have to change the offsets for
  *     registers in the same peripheral device on different models
 
 /*
  *     Define the buffer descriptor structure.
+ *
+ *     Evidently, ARM SoCs have the FEC block generated in a
+ *     little endian mode so adjust endianness accordingly.
  */
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
+#define fec32_to_cpu le32_to_cpu
+#define fec16_to_cpu le16_to_cpu
+#define cpu_to_fec32 cpu_to_le32
+#define cpu_to_fec16 cpu_to_le16
+#define __fec32 __le32
+#define __fec16 __le16
+
 struct bufdesc {
-       unsigned short cbd_datlen;      /* Data length */
-       unsigned short cbd_sc;  /* Control and status info */
-       unsigned long cbd_bufaddr;      /* Buffer address */
+       __fec16 cbd_datlen;     /* Data length */
+       __fec16 cbd_sc;         /* Control and status info */
+       __fec32 cbd_bufaddr;    /* Buffer address */
 };
 #else
+#define fec32_to_cpu be32_to_cpu
+#define fec16_to_cpu be16_to_cpu
+#define cpu_to_fec32 cpu_to_be32
+#define cpu_to_fec16 cpu_to_be16
+#define __fec32 __be32
+#define __fec16 __be16
+
 struct bufdesc {
-       unsigned short  cbd_sc;                 /* Control and status info */
-       unsigned short  cbd_datlen;             /* Data length */
-       unsigned long   cbd_bufaddr;            /* Buffer address */
+       __fec16 cbd_sc;         /* Control and status info */
+       __fec16 cbd_datlen;     /* Data length */
+       __fec32 cbd_bufaddr;    /* Buffer address */
 };
 #endif
 
 struct bufdesc_ex {
        struct bufdesc desc;
-       unsigned long cbd_esc;
-       unsigned long cbd_prot;
-       unsigned long cbd_bdu;
-       unsigned long ts;
-       unsigned short res0[4];
+       __fec32 cbd_esc;
+       __fec32 cbd_prot;
+       __fec32 cbd_bdu;
+       __fec32 ts;
+       __fec16 res0[4];
 };
 
 /*
index 502da6f48f95e5e52a43ce5666a5c265fcd9cfe3..41c81f6ec630a289c2c280ae068f429a8da9e21e 100644 (file)
@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
        bdp = txq->tx_bd_base;
 
        do {
-               pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+               pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
                        index,
                        bdp == txq->cur_tx ? 'S' : ' ',
                        bdp == txq->dirty_tx ? 'H' : ' ',
-                       bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+                       fec16_to_cpu(bdp->cbd_sc),
+                       fec32_to_cpu(bdp->cbd_bufaddr),
+                       fec16_to_cpu(bdp->cbd_datlen),
                        txq->tx_skbuff[index]);
                bdp = fec_enet_get_nextdesc(bdp, fep, 0);
                index++;
@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
                ebdp = (struct bufdesc_ex *)bdp;
 
-               status = bdp->cbd_sc;
+               status = fec16_to_cpu(bdp->cbd_sc);
                status &= ~BD_ENET_TX_STATS;
                status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
                frag_len = skb_shinfo(skb)->frags[frag].size;
@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
                                estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
                        ebdp->cbd_bdu = 0;
-                       ebdp->cbd_esc = estatus;
+                       ebdp->cbd_esc = cpu_to_fec32(estatus);
                }
 
                bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                        goto dma_mapping_error;
                }
 
-               bdp->cbd_bufaddr = addr;
-               bdp->cbd_datlen = frag_len;
-               bdp->cbd_sc = status;
+               bdp->cbd_bufaddr = cpu_to_fec32(addr);
+               bdp->cbd_datlen = cpu_to_fec16(frag_len);
+               bdp->cbd_sc = cpu_to_fec16(status);
        }
 
        return bdp;
@@ -445,8 +447,8 @@ dma_mapping_error:
        bdp = txq->cur_tx;
        for (i = 0; i < frag; i++) {
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
-               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-                               bdp->cbd_datlen, DMA_TO_DEVICE);
+               dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
+                                fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
        }
        return ERR_PTR(-ENOMEM);
 }
@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
        /* Fill in a Tx ring entry */
        bdp = txq->cur_tx;
        last_bdp = bdp;
-       status = bdp->cbd_sc;
+       status = fec16_to_cpu(bdp->cbd_sc);
        status &= ~BD_ENET_TX_STATS;
 
        /* Set buffer length and buffer pointer */
@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
 
                ebdp->cbd_bdu = 0;
-               ebdp->cbd_esc = estatus;
+               ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
        index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
        /* Save skb pointer */
        txq->tx_skbuff[index] = skb;
 
-       bdp->cbd_datlen = buflen;
-       bdp->cbd_bufaddr = addr;
+       bdp->cbd_datlen = cpu_to_fec16(buflen);
+       bdp->cbd_bufaddr = cpu_to_fec32(addr);
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
         */
        status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
-       bdp->cbd_sc = status;
+       bdp->cbd_sc = cpu_to_fec16(status);
 
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
        unsigned int estatus = 0;
        dma_addr_t addr;
 
-       status = bdp->cbd_sc;
+       status = fec16_to_cpu(bdp->cbd_sc);
        status &= ~BD_ENET_TX_STATS;
 
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       bdp->cbd_datlen = size;
-       bdp->cbd_bufaddr = addr;
+       bdp->cbd_datlen = cpu_to_fec16(size);
+       bdp->cbd_bufaddr = cpu_to_fec32(addr);
 
        if (fep->bufdesc_ex) {
                if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
                ebdp->cbd_bdu = 0;
-               ebdp->cbd_esc = estatus;
+               ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
        /* Handle the last BD specially */
@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
        if (is_last) {
                status |= BD_ENET_TX_INTR;
                if (fep->bufdesc_ex)
-                       ebdp->cbd_esc |= BD_ENET_TX_INT;
+                       ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
        }
 
-       bdp->cbd_sc = status;
+       bdp->cbd_sc = cpu_to_fec16(status);
 
        return 0;
 }
@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
        unsigned short status;
        unsigned int estatus = 0;
 
-       status = bdp->cbd_sc;
+       status = fec16_to_cpu(bdp->cbd_sc);
        status &= ~BD_ENET_TX_STATS;
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
 
@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
                }
        }
 
-       bdp->cbd_bufaddr = dmabuf;
-       bdp->cbd_datlen = hdr_len;
+       bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
+       bdp->cbd_datlen = cpu_to_fec16(hdr_len);
 
        if (fep->bufdesc_ex) {
                if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
                ebdp->cbd_bdu = 0;
-               ebdp->cbd_esc = estatus;
+               ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
-       bdp->cbd_sc = status;
+       bdp->cbd_sc = cpu_to_fec16(status);
 
        return 0;
 }
@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                        /* Initialize the BD for every fragment in the page. */
                        if (bdp->cbd_bufaddr)
-                               bdp->cbd_sc = BD_ENET_RX_EMPTY;
+                               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
                        else
-                               bdp->cbd_sc = 0;
+                               bdp->cbd_sc = cpu_to_fec16(0);
                        bdp = fec_enet_get_nextdesc(bdp, fep, q);
                }
 
                /* Set the last buffer to wrap */
                bdp = fec_enet_get_prevdesc(bdp, fep, q);
-               bdp->cbd_sc |= BD_SC_WRAP;
+               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
 
                rxq->cur_rx = rxq->rx_bd_base;
        }
@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                for (i = 0; i < txq->tx_ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
-                       bdp->cbd_sc = 0;
+                       bdp->cbd_sc = cpu_to_fec16(0);
                        if (txq->tx_skbuff[i]) {
                                dev_kfree_skb_any(txq->tx_skbuff[i]);
                                txq->tx_skbuff[i] = NULL;
                        }
-                       bdp->cbd_bufaddr = 0;
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
                        bdp = fec_enet_get_nextdesc(bdp, fep, q);
                }
 
                /* Set the last buffer to wrap */
                bdp = fec_enet_get_prevdesc(bdp, fep, q);
-               bdp->cbd_sc |= BD_SC_WRAP;
+               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
                txq->dirty_tx = bdp;
        }
 }
@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
         */
        if (fep->quirks & FEC_QUIRK_ENET_MAC) {
                memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+               writel((__force u32)cpu_to_be32(temp_mac[0]),
+                      fep->hwp + FEC_ADDR_LOW);
+               writel((__force u32)cpu_to_be32(temp_mac[1]),
+                      fep->hwp + FEC_ADDR_HIGH);
        }
 
        /* Clear any outstanding interrupt. */
@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
        while (bdp != READ_ONCE(txq->cur_tx)) {
                /* Order the load of cur_tx and cbd_sc */
                rmb();
-               status = READ_ONCE(bdp->cbd_sc);
+               status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
                if (status & BD_ENET_TX_READY)
                        break;
 
@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                skb = txq->tx_skbuff[index];
                txq->tx_skbuff[index] = NULL;
-               if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
-                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-                                       bdp->cbd_datlen, DMA_TO_DEVICE);
-               bdp->cbd_bufaddr = 0;
+               if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                       dma_unmap_single(&fep->pdev->dev,
+                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                        fec16_to_cpu(bdp->cbd_datlen),
+                                        DMA_TO_DEVICE);
+               bdp->cbd_bufaddr = cpu_to_fec32(0);
                if (!skb) {
                        bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
                        continue;
@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                        struct skb_shared_hwtstamps shhwtstamps;
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
                        skb_tstamp_tx(skb, &shhwtstamps);
                }
 
@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
        if (off)
                skb_reserve(skb, fep->rx_align + 1 - off);
 
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
-                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
-                                         DMA_FROM_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
+       if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
                if (net_ratelimit())
                        netdev_err(ndev, "Rx DMA memory map failed\n");
                return -ENOMEM;
@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
        if (!new_skb)
                return false;
 
-       dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+       dma_sync_single_for_cpu(&fep->pdev->dev,
+                               fec32_to_cpu(bdp->cbd_bufaddr),
                                FEC_ENET_RX_FRSIZE - fep->rx_align,
                                DMA_FROM_DEVICE);
        if (!swap)
@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
         */
        bdp = rxq->cur_rx;
 
-       while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+       while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
 
                if (pkt_received >= budget)
                        break;
@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 
                /* Process the incoming frame. */
                ndev->stats.rx_packets++;
-               pkt_len = bdp->cbd_datlen;
+               pkt_len = fec16_to_cpu(bdp->cbd_datlen);
                ndev->stats.rx_bytes += pkt_len;
 
                index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                                ndev->stats.rx_dropped++;
                                goto rx_processing_done;
                        }
-                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                       dma_unmap_single(&fep->pdev->dev,
+                                        fec32_to_cpu(bdp->cbd_bufaddr),
                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
                                         DMA_FROM_DEVICE);
                }
@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                /* If this is a VLAN packet remove the VLAN Tag */
                vlan_packet_rcvd = false;
                if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                       fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+                   fep->bufdesc_ex &&
+                   (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
                        /* Push and remove the vlan tag */
                        struct vlan_hdr *vlan_header =
                                        (struct vlan_hdr *) (data + ETH_HLEN);
@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 
                /* Get receive timestamp from the skb */
                if (fep->hwts_rx_en && fep->bufdesc_ex)
-                       fec_enet_hwtstamp(fep, ebdp->ts,
+                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
                                          skb_hwtstamps(skb));
 
                if (fep->bufdesc_ex &&
                    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
-                       if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+                       if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
                                /* don't check it */
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
                        } else {
@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                napi_gro_receive(&fep->napi, skb);
 
                if (is_copybreak) {
-                       dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+                       dma_sync_single_for_device(&fep->pdev->dev,
+                                                  fec32_to_cpu(bdp->cbd_bufaddr),
                                                   FEC_ENET_RX_FRSIZE - fep->rx_align,
                                                   DMA_FROM_DEVICE);
                } else {
@@ -1527,12 +1535,12 @@ rx_processing_done:
 
                /* Mark the buffer empty */
                status |= BD_ENET_RX_EMPTY;
-               bdp->cbd_sc = status;
+               bdp->cbd_sc = cpu_to_fec16(status);
 
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       ebdp->cbd_esc = BD_ENET_RX_INT;
+                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
                        ebdp->cbd_prot = 0;
                        ebdp->cbd_bdu = 0;
                }
@@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
 
 /* List of registers that can be safety be read to dump them with ethtool */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-       defined(CONFIG_M520x) || defined(CONFIG_M532x) ||               \
-       defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+       defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 static u32 fec_enet_register_offset[] = {
        FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
        FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
                        rxq->rx_skbuff[i] = NULL;
                        if (skb) {
                                dma_unmap_single(&fep->pdev->dev,
-                                                bdp->cbd_bufaddr,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
                                                 FEC_ENET_RX_FRSIZE - fep->rx_align,
                                                 DMA_FROM_DEVICE);
                                dev_kfree_skb(skb);
@@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
                }
 
                rxq->rx_skbuff[i] = skb;
-               bdp->cbd_sc = BD_ENET_RX_EMPTY;
+               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
 
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-                       ebdp->cbd_esc = BD_ENET_RX_INT;
+                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
                }
 
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
 
        /* Set the last buffer to wrap. */
        bdp = fec_enet_get_prevdesc(bdp, fep, queue);
-       bdp->cbd_sc |= BD_SC_WRAP;
+       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
        return 0;
 
  err_alloc:
@@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
                if (!txq->tx_bounce[i])
                        goto err_alloc;
 
-               bdp->cbd_sc = 0;
-               bdp->cbd_bufaddr = 0;
+               bdp->cbd_sc = cpu_to_fec16(0);
+               bdp->cbd_bufaddr = cpu_to_fec32(0);
 
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-                       ebdp->cbd_esc = BD_ENET_TX_INT;
+                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
                }
 
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
 
        /* Set the last buffer to wrap. */
        bdp = fec_enet_get_prevdesc(bdp, fep, queue);
-       bdp->cbd_sc |= BD_SC_WRAP;
+       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
 
        return 0;
 
index 52e0091b4fb261f983ab0e043d1cb0d48638dc03..1ba359f17ec6e2103d8b1b126ec84a55d30f006e 100644 (file)
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
        cbd_t __iomem *prev_bd;
        cbd_t __iomem *last_tx_bd;
 
-       last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
+       last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
 
        /* get the current bd held in TBPTR  and scan back from this point */
        recheck_bd = curr_tbptr = (cbd_t __iomem *)
index b3645297477e53309918e6762c44a6bfde031880..3bfe36f9405b60b4822185853ca3d3d35ea563a5 100644 (file)
@@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = {
 static int __ae_match(struct device *dev, const void *data)
 {
        struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
-       const char *ae_id = data;
 
-       if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
-               return 1;
-
-       return 0;
+       return hdev->dev->of_node == data;
 }
 
-static struct hnae_ae_dev *find_ae(const char *ae_id)
+static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
 {
        struct device *dev;
 
-       WARN_ON(!ae_id);
+       WARN_ON(!ae_node);
 
-       dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+       dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
 
        return dev ? cls_to_ae_dev(dev) : NULL;
 }
@@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle);
  * return handle ptr or ERR_PTR
  */
 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
-                                   const char *ae_id, u32 port_id,
+                                   const struct device_node *ae_node,
+                                   u32 port_id,
                                    struct hnae_buf_ops *bops)
 {
        struct hnae_ae_dev *dev;
@@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
        int i, j;
        int ret;
 
-       dev = find_ae(ae_id);
+       dev = find_ae(ae_node);
        if (!dev)
                return ERR_PTR(-ENODEV);
 
index 6ca94dc3dda3c2447d59e369fee4eadeba6e0f52..1cbcb9fa3fb56b5ea4fee252f3f35294e6e7b6d3 100644 (file)
@@ -524,8 +524,11 @@ struct hnae_handle {
 
 #define ring_to_dev(ring) ((ring)->q->dev->dev)
 
-struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
-                                   u32 port_id, struct hnae_buf_ops *bops);
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+                                   const struct device_node *ae_node,
+                                   u32 port_id,
+                                   struct hnae_buf_ops *bops);
+
 void hnae_put_handle(struct hnae_handle *handle);
 int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
 void hnae_ae_unregister(struct hnae_ae_dev *dev);
index 522b264866b42e68b65e40cd93e28f991a3e678c..a0070d0e740dae5b3de8f2b2a92a772854b08321 100644 (file)
@@ -847,6 +847,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
 int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
 {
        struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+       static atomic_t id = ATOMIC_INIT(-1);
 
        switch (dsaf_dev->dsaf_ver) {
        case AE_VERSION_1:
@@ -858,6 +859,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
        default:
                break;
        }
+
+       snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
+                (int)atomic_inc_return(&id));
        ae_dev->ops = &hns_dsaf_ops;
        ae_dev->dev = dsaf_dev->dev;
 
index 1c33bd06bd5cc484ba320e60ceb5bee113493014..9439f04962e1d96bf480d8aedf6472c912e28813 100644 (file)
@@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        int ret, i;
        u32 desc_num;
        u32 buf_size;
-       const char *name, *mode_str;
+       const char *mode_str;
        struct device_node *np = dsaf_dev->dev->of_node;
 
        if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
@@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        else
                dsaf_dev->dsaf_ver = AE_VERSION_2;
 
-       ret = of_property_read_string(np, "dsa_name", &name);
-       if (ret) {
-               dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
-               return ret;
-       }
-       strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
-       dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
-
        ret = of_property_read_string(np, "mode", &mode_str);
        if (ret) {
                dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
index 31c312f9826e9d8bc644b0aa1c6d6852ea25e5bd..40205b910f80e66a439c14b53e7f2c907b0fbeb1 100644 (file)
@@ -18,6 +18,7 @@ struct hns_mac_cb;
 
 #define DSAF_DRV_NAME "hns_dsaf"
 #define DSAF_MOD_VERSION "v1.0"
+#define DSAF_DEVICE_NAME "dsaf"
 
 #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000
 
index 0e30846a24f89b632028211f60ee82018432a7e0..3f77ff77abbc4594f8aeb0ccc40c6691fc122fac 100644 (file)
@@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
        int ret;
 
        h = hnae_get_handle(&priv->netdev->dev,
-                           priv->ae_name, priv->port_id, NULL);
+                           priv->ae_node, priv->port_id, NULL);
        if (IS_ERR_OR_NULL(h)) {
                ret = PTR_ERR(h);
                dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
        else
                priv->enet_ver = AE_VERSION_2;
 
-       ret = of_property_read_string(node, "ae-name", &priv->ae_name);
-       if (ret)
-               goto out_read_string_fail;
+       priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
+       if (IS_ERR_OR_NULL(priv->ae_node)) {
+               ret = PTR_ERR(priv->ae_node);
+               dev_err(dev, "not find ae-handle\n");
+               goto out_read_prop_fail;
+       }
 
        ret = of_property_read_u32(node, "port-id", &priv->port_id);
        if (ret)
-               goto out_read_string_fail;
+               goto out_read_prop_fail;
 
        hns_init_mac_addr(ndev);
 
@@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
 
 out_notify_fail:
        (void)cancel_work_sync(&priv->service_task);
-out_read_string_fail:
+out_read_prop_fail:
        free_netdev(ndev);
        return ret;
 }
index 4b75270f014e765ba1cc02010c4175cafddda84c..c68ab3d34fc224ef00f43532f929fbd533b2ee80 100644 (file)
@@ -51,7 +51,7 @@ struct hns_nic_ops {
 };
 
 struct hns_nic_priv {
-       const char *ae_name;
+       const struct device_node *ae_node;
        u32 enet_ver;
        u32 port_id;
        int phy_mode;
index 1d5c3e16d8f4f4ad1f3ba580ee3a4a9c48663551..3daf2d4a7ca057a66e9ad797baa581c0d92caeba 100644 (file)
@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
 };
 #endif
 
-#ifdef CONFIG_EISA
 static struct eisa_device_id hp100_eisa_tbl[] = {
        { "HWPF180" }, /* HP J2577 rev A */
        { "HWP1920" }, /* HP 27248B */
@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
        { "" }         /* Mandatory final entry ! */
 };
 MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
-#endif
 
-#ifdef CONFIG_PCI
 static const struct pci_device_id hp100_pci_tbl[] = {
        {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
        {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
        {}                      /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
-#endif
 
 static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
 static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d)
        free_netdev(d);
 }
 
-#ifdef CONFIG_EISA
 static int hp100_eisa_probe(struct device *gendev)
 {
        struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
                .remove  = hp100_eisa_remove,
         }
 };
-#endif
 
-#ifdef CONFIG_PCI
 static int hp100_pci_probe(struct pci_dev *pdev,
                           const struct pci_device_id *ent)
 {
@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
        .probe          = hp100_pci_probe,
        .remove         = hp100_pci_remove,
 };
-#endif
 
 /*
  *  module section
@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
        err = hp100_isa_init();
        if (err && err != -ENODEV)
                goto out;
-#ifdef CONFIG_EISA
        err = eisa_driver_register(&hp100_eisa_driver);
        if (err && err != -ENODEV)
                goto out2;
-#endif
-#ifdef CONFIG_PCI
        err = pci_register_driver(&hp100_pci_driver);
        if (err && err != -ENODEV)
                goto out3;
-#endif
  out:
        return err;
  out3:
-#ifdef CONFIG_EISA
        eisa_driver_unregister (&hp100_eisa_driver);
  out2:
-#endif
        hp100_isa_cleanup();
        goto out;
 }
@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
 static void __exit hp100_module_exit(void)
 {
        hp100_isa_cleanup();
-#ifdef CONFIG_EISA
        eisa_driver_unregister (&hp100_eisa_driver);
-#endif
-#ifdef CONFIG_PCI
        pci_unregister_driver (&hp100_pci_driver);
-#endif
 }
 
 module_init(hp100_module_init)
index 68f2204ec6f3aa712ad8768f2985c46c9268df0c..53ed3bdd836311be4b0b4e2c383e8a2e04c311fa 100644 (file)
@@ -339,6 +339,8 @@ struct i40e_pf {
 #define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 #define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE       BIT_ULL(41)
 #define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(42)
+#define I40E_FLAG_100M_SGMII_CAPABLE           BIT_ULL(43)
+#define I40E_FLAG_RESTART_AUTONEG              BIT_ULL(44)
 #define I40E_FLAG_PF_MAC                       BIT_ULL(50)
 
        /* tracks features that get auto disabled by errors */
index b22012a446a6e127fc7ed83665e3d629f441e4f2..0e608d2a70d5daef38dd1adb4765b472c9edf75b 100644 (file)
@@ -220,6 +220,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
        i40e_aqc_opc_set_phy_debug              = 0x0622,
        i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
+       i40e_aqc_opc_run_phy_activity           = 0x0626,
 
        /* NVM commands */
        i40e_aqc_opc_nvm_read                   = 0x0701,
@@ -402,6 +403,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
 #define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY   0x0008
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
@@ -422,6 +424,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_LED             0x0061
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT                0x0064
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -1257,9 +1260,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN              0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN              0
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                        2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE             2
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
 
        __le32  tenant_id;
@@ -1755,7 +1758,12 @@ struct i40e_aqc_get_link_status {
        u8      config;
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      reserved[5];
+       u8      external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1     0x00
+#define I40E_AQ_LINK_POWER_CLASS_2     0x01
+#define I40E_AQ_LINK_POWER_CLASS_3     0x02
+#define I40E_AQ_LINK_POWER_CLASS_4     0x03
+       u8      reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1823,6 +1831,18 @@ enum i40e_aq_phy_reg_type {
        I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
 };
 
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+       __le16  activity_id;
+       u8      flags;
+       u8      reserved1;
+       __le32  control;
+       __le32  data;
+       u8      reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
index 6a034ddac36a346e916dd11dde595f366bc34e32..3b03a3165ca71d474f06b0871342add07feb452e 100644 (file)
@@ -55,6 +55,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_20G_KR2_A:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
+               case I40E_DEV_ID_KX_X722:
+               case I40E_DEV_ID_QSFP_X722:
                case I40E_DEV_ID_SFP_X722:
                case I40E_DEV_ID_1G_BASE_T_X722:
                case I40E_DEV_ID_10G_BASE_T_X722:
@@ -2765,35 +2767,6 @@ i40e_aq_erase_nvm_exit:
        return status;
 }
 
-#define I40E_DEV_FUNC_CAP_SWITCH_MODE  0x01
-#define I40E_DEV_FUNC_CAP_MGMT_MODE    0x02
-#define I40E_DEV_FUNC_CAP_NPAR         0x03
-#define I40E_DEV_FUNC_CAP_OS2BMC       0x04
-#define I40E_DEV_FUNC_CAP_VALID_FUNC   0x05
-#define I40E_DEV_FUNC_CAP_SRIOV_1_1    0x12
-#define I40E_DEV_FUNC_CAP_VF           0x13
-#define I40E_DEV_FUNC_CAP_VMDQ         0x14
-#define I40E_DEV_FUNC_CAP_802_1_QBG    0x15
-#define I40E_DEV_FUNC_CAP_802_1_QBH    0x16
-#define I40E_DEV_FUNC_CAP_VSI          0x17
-#define I40E_DEV_FUNC_CAP_DCB          0x18
-#define I40E_DEV_FUNC_CAP_FCOE         0x21
-#define I40E_DEV_FUNC_CAP_ISCSI                0x22
-#define I40E_DEV_FUNC_CAP_RSS          0x40
-#define I40E_DEV_FUNC_CAP_RX_QUEUES    0x41
-#define I40E_DEV_FUNC_CAP_TX_QUEUES    0x42
-#define I40E_DEV_FUNC_CAP_MSIX         0x43
-#define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
-#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
-#define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
-#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
-#define I40E_DEV_FUNC_CAP_CEM          0xF2
-#define I40E_DEV_FUNC_CAP_IWARP                0x51
-#define I40E_DEV_FUNC_CAP_LED          0x61
-#define I40E_DEV_FUNC_CAP_SDP          0x62
-#define I40E_DEV_FUNC_CAP_MDIO         0x63
-#define I40E_DEV_FUNC_CAP_WR_CSR_PROT  0x64
-
 /**
  * i40e_parse_discover_capabilities
  * @hw: pointer to the hw struct
@@ -2832,79 +2805,79 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                major_rev = cap->major_rev;
 
                switch (id) {
-               case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+               case I40E_AQ_CAP_ID_SWITCH_MODE:
                        p->switch_mode = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MGMT_MODE:
+               case I40E_AQ_CAP_ID_MNG_MODE:
                        p->management_mode = number;
                        break;
-               case I40E_DEV_FUNC_CAP_NPAR:
+               case I40E_AQ_CAP_ID_NPAR_ACTIVE:
                        p->npar_enable = number;
                        break;
-               case I40E_DEV_FUNC_CAP_OS2BMC:
+               case I40E_AQ_CAP_ID_OS2BMC_CAP:
                        p->os2bmc = number;
                        break;
-               case I40E_DEV_FUNC_CAP_VALID_FUNC:
+               case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
                        p->valid_functions = number;
                        break;
-               case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+               case I40E_AQ_CAP_ID_SRIOV:
                        if (number == 1)
                                p->sr_iov_1_1 = true;
                        break;
-               case I40E_DEV_FUNC_CAP_VF:
+               case I40E_AQ_CAP_ID_VF:
                        p->num_vfs = number;
                        p->vf_base_id = logical_id;
                        break;
-               case I40E_DEV_FUNC_CAP_VMDQ:
+               case I40E_AQ_CAP_ID_VMDQ:
                        if (number == 1)
                                p->vmdq = true;
                        break;
-               case I40E_DEV_FUNC_CAP_802_1_QBG:
+               case I40E_AQ_CAP_ID_8021QBG:
                        if (number == 1)
                                p->evb_802_1_qbg = true;
                        break;
-               case I40E_DEV_FUNC_CAP_802_1_QBH:
+               case I40E_AQ_CAP_ID_8021QBR:
                        if (number == 1)
                                p->evb_802_1_qbh = true;
                        break;
-               case I40E_DEV_FUNC_CAP_VSI:
+               case I40E_AQ_CAP_ID_VSI:
                        p->num_vsis = number;
                        break;
-               case I40E_DEV_FUNC_CAP_DCB:
+               case I40E_AQ_CAP_ID_DCB:
                        if (number == 1) {
                                p->dcb = true;
                                p->enabled_tcmap = logical_id;
                                p->maxtc = phys_id;
                        }
                        break;
-               case I40E_DEV_FUNC_CAP_FCOE:
+               case I40E_AQ_CAP_ID_FCOE:
                        if (number == 1)
                                p->fcoe = true;
                        break;
-               case I40E_DEV_FUNC_CAP_ISCSI:
+               case I40E_AQ_CAP_ID_ISCSI:
                        if (number == 1)
                                p->iscsi = true;
                        break;
-               case I40E_DEV_FUNC_CAP_RSS:
+               case I40E_AQ_CAP_ID_RSS:
                        p->rss = true;
                        p->rss_table_size = number;
                        p->rss_table_entry_width = logical_id;
                        break;
-               case I40E_DEV_FUNC_CAP_RX_QUEUES:
+               case I40E_AQ_CAP_ID_RXQ:
                        p->num_rx_qp = number;
                        p->base_queue = phys_id;
                        break;
-               case I40E_DEV_FUNC_CAP_TX_QUEUES:
+               case I40E_AQ_CAP_ID_TXQ:
                        p->num_tx_qp = number;
                        p->base_queue = phys_id;
                        break;
-               case I40E_DEV_FUNC_CAP_MSIX:
+               case I40E_AQ_CAP_ID_MSIX:
                        p->num_msix_vectors = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MSIX_VF:
+               case I40E_AQ_CAP_ID_VF_MSIX:
                        p->num_msix_vectors_vf = number;
                        break;
-               case I40E_DEV_FUNC_CAP_FLEX10:
+               case I40E_AQ_CAP_ID_FLEX10:
                        if (major_rev == 1) {
                                if (number == 1) {
                                        p->flex10_enable = true;
@@ -2920,38 +2893,38 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        p->flex10_mode = logical_id;
                        p->flex10_status = phys_id;
                        break;
-               case I40E_DEV_FUNC_CAP_CEM:
+               case I40E_AQ_CAP_ID_CEM:
                        if (number == 1)
                                p->mgmt_cem = true;
                        break;
-               case I40E_DEV_FUNC_CAP_IWARP:
+               case I40E_AQ_CAP_ID_IWARP:
                        if (number == 1)
                                p->iwarp = true;
                        break;
-               case I40E_DEV_FUNC_CAP_LED:
+               case I40E_AQ_CAP_ID_LED:
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->led[phys_id] = true;
                        break;
-               case I40E_DEV_FUNC_CAP_SDP:
+               case I40E_AQ_CAP_ID_SDP:
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->sdp[phys_id] = true;
                        break;
-               case I40E_DEV_FUNC_CAP_MDIO:
+               case I40E_AQ_CAP_ID_MDIO:
                        if (number == 1) {
                                p->mdio_port_num = phys_id;
                                p->mdio_port_mode = logical_id;
                        }
                        break;
-               case I40E_DEV_FUNC_CAP_IEEE_1588:
+               case I40E_AQ_CAP_ID_1588:
                        if (number == 1)
                                p->ieee_1588 = true;
                        break;
-               case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+               case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
                        p->fd = true;
                        p->fd_filters_guaranteed = number;
                        p->fd_filters_best_effort = logical_id;
                        break;
-               case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+               case I40E_AQ_CAP_ID_WSR_PROT:
                        p->wr_csr_prot = (u64)number;
                        p->wr_csr_prot |= (u64)logical_id << 32;
                        break;
index 2691277c0055d2572f2994e24c120066a7bec28b..582daa7ad77621e3dfdcc4941bd3a55adf6961d6 100644 (file)
@@ -814,13 +814,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
        struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
        struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
 
-       /* If Firmware version < v4.33 IEEE only */
-       if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
-           (hw->aq.fw_maj_ver < 4))
+       /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+       if ((hw->mac.type == I40E_MAC_XL710) &&
+           (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+             (hw->aq.fw_maj_ver < 4)))
                return i40e_get_ieee_dcb_config(hw);
 
-       /* If Firmware version == v4.33 use old CEE struct */
-       if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+       /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+       if ((hw->mac.type == I40E_MAC_XL710) &&
+           ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
                ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
                                                 sizeof(cee_v1_cfg), NULL);
                if (!ret) {
index 448ef4c17efbb3d815de7005008e2e8278f28992..f7ce5c7c90031a4af7bf26a97ba058c4fa4ffa4d 100644 (file)
@@ -41,6 +41,8 @@
 #define I40E_DEV_ID_10G_BASE_T4                0x1589
 #define I40E_DEV_ID_VF                 0x154C
 #define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_KX_X722            0x37CE
+#define I40E_DEV_ID_QSFP_X722          0x37CF
 #define I40E_DEV_ID_SFP_X722           0x37D0
 #define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
index 29d5833e24a3ff558c9e26e62e3f6a74e1b4e7dd..45495911c5a4f5f221bac91ced1f284371880c4c 100644 (file)
@@ -340,7 +340,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                  SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               if (pf->hw.mac.type == I40E_MAC_X722) {
+               if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
                        ecmd->supported |= SUPPORTED_100baseT_Full;
                        if (hw_link_info->requested_speeds &
                            I40E_LINK_SPEED_100MB)
@@ -411,6 +411,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
                if (pf->hw.mac.type == I40E_MAC_X722) {
                        ecmd->supported |= SUPPORTED_100baseT_Full;
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+                               ecmd->supported |= SUPPORTED_100baseT_Full;
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       }
                }
        }
        if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
@@ -2166,9 +2170,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
@@ -2178,9 +2185,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
@@ -2190,10 +2200,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
@@ -2204,10 +2217,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
index bb4612c159fd197db5b2a750027400b48cbdec77..320b0491abd95db56a7d71028fba3585acd20134 100644 (file)
@@ -51,7 +51,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 8
+#define DRV_VERSION_BUILD 10
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
@@ -110,6 +112,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static struct workqueue_struct *i40e_wq;
+
 /**
  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
@@ -295,7 +299,7 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
        if (!test_bit(__I40E_DOWN, &pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
            !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
-               schedule_work(&pf->service_task);
+               queue_work(i40e_wq, &pf->service_task);
 }
 
 /**
@@ -1368,7 +1372,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                f->changed = true;
 
                INIT_LIST_HEAD(&f->list);
-               list_add(&f->list, &vsi->mac_filter_list);
+               list_add_tail(&f->list, &vsi->mac_filter_list);
        }
 
        /* increment counter and add a new flag if needed */
@@ -6889,8 +6893,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                wr32(hw, I40E_REG_MSS, val);
        }
 
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
@@ -7117,9 +7120,7 @@ static void i40e_service_task(struct work_struct *work)
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
        i40e_sync_filters_subtask(pf);
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
        i40e_sync_udp_filters_subtask(pf);
-#endif
        i40e_clean_adminq_subtask(pf);
 
        i40e_service_event_complete(pf);
@@ -7937,6 +7938,52 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
        return ret;
 }
 
+/**
+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
+ * @seed: Buffter to store the hash keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return 0 on success, negative on failure
+ */
+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+                          u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int ret = 0;
+
+       if (seed) {
+               ret = i40e_aq_get_rss_key(hw, vsi->id,
+                       (struct i40e_aqc_get_set_rss_key_data *)seed);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot get RSS key, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       if (lut) {
+               bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+               ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot get RSS lut, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
 /**
  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
  * @vsi: Pointer to vsi structure
@@ -8039,7 +8086,12 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
  */
 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
 {
-       return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+       struct i40e_pf *pf = vsi->back;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+               return i40e_get_rss_aq(vsi, seed, lut, lut_size);
+       else
+               return i40e_get_rss_reg(vsi, seed, lut, lut_size);
 }
 
 /**
@@ -8369,6 +8421,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
                                 pf->hw.func_caps.fd_filters_best_effort;
        }
 
+       if (((pf->hw.mac.type == I40E_MAC_X710) ||
+            (pf->hw.mac.type == I40E_MAC_XL710)) &&
+           (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)))
+               pf->flags |= I40E_FLAG_RESTART_AUTONEG;
+
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
@@ -8395,6 +8453,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
                             I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
                             I40E_FLAG_WB_ON_ITR_CAPABLE |
                             I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+                            I40E_FLAG_100M_SGMII_CAPABLE |
                             I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
        }
        pf->eeprom_version = 0xDEAD;
@@ -8515,6 +8574,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
 }
 
 #endif
+
+#if IS_ENABLED(CONFIG_VXLAN)
 /**
  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @netdev: This physical port's netdev
@@ -8524,7 +8585,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
 static void i40e_add_vxlan_port(struct net_device *netdev,
                                sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_VXLAN)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8557,7 +8617,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
        pf->pending_udp_bitmap |= BIT_ULL(next_idx);
        pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-#endif
 }
 
 /**
@@ -8569,7 +8628,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 static void i40e_del_vxlan_port(struct net_device *netdev,
                                sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_VXLAN)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8592,9 +8650,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
-#endif
 }
+#endif
 
+#if IS_ENABLED(CONFIG_GENEVE)
 /**
  * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
  * @netdev: This physical port's netdev
@@ -8604,7 +8663,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 static void i40e_add_geneve_port(struct net_device *netdev,
                                 sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_GENEVE)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8639,7 +8697,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
        pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
 
        dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
-#endif
 }
 
 /**
@@ -8651,7 +8708,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
 static void i40e_del_geneve_port(struct net_device *netdev,
                                 sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_GENEVE)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8677,8 +8733,8 @@ static void i40e_del_geneve_port(struct net_device *netdev,
                netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
                            ntohs(port));
        }
-#endif
 }
+#endif
 
 static int i40e_get_phys_port_id(struct net_device *netdev,
                                 struct netdev_phys_item_id *ppid)
@@ -8947,11 +9003,11 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 
-       netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
-                                 NETIF_F_RXCSUM         |
-                                 NETIF_F_GSO_UDP_TUNNEL |
-                                 NETIF_F_GSO_GRE        |
-                                 NETIF_F_TSO;
+       netdev->hw_enc_features |= NETIF_F_IP_CSUM        |
+                                  NETIF_F_GSO_UDP_TUNNEL |
+                                  NETIF_F_GSO_GRE        |
+                                  NETIF_F_TSO            |
+                                  0;
 
        netdev->features = NETIF_F_SG                  |
                           NETIF_F_IP_CSUM             |
@@ -10909,8 +10965,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                wr32(hw, I40E_REG_MSS, val);
        }
 
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
@@ -11418,6 +11473,16 @@ static int __init i40e_init_module(void)
                i40e_driver_string, i40e_driver_version_str);
        pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
 
+       /* we will see if single thread per module is enough for now,
+        * it can't be any worse than using the system workqueue which
+        * was already single threaded
+        */
+       i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+       if (!i40e_wq) {
+               pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+               return -ENOMEM;
+       }
+
        i40e_dbg_init();
        return pci_register_driver(&i40e_driver);
 }
@@ -11432,6 +11497,7 @@ module_init(i40e_init_module);
 static void __exit i40e_exit_module(void)
 {
        pci_unregister_driver(&i40e_driver);
+       destroy_workqueue(i40e_wq);
        i40e_dbg_exit();
 }
 module_exit(i40e_exit_module);
index 720516b0e8eec59debf3b41195f9a2617eef169b..47bd8b3145a79a9bf2e299fc303100be4bf8289c 100644 (file)
@@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
-       struct udphdr *oudph;
-       struct iphdr *oiph;
+       struct udphdr *oudph = NULL;
+       struct iphdr *oiph = NULL;
        u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
index 63e62f9aec6ef45e73e08970d5fca6152ac102c3..659d78270fdbaffe5e99f766d673f01cef98ab6a 100644 (file)
@@ -1213,9 +1213,21 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
        }
 
+       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                       vfres->vf_offload_flags |=
+                               I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+       }
+
        if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
 
+       if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+       }
+
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
index f5b2b369dc7ce883820faae432e1f10947e29d14..578b1780fb08deaeaff6bfdb0e6fa50e5a93311a 100644 (file)
@@ -220,6 +220,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
        i40e_aqc_opc_set_phy_debug              = 0x0622,
        i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
+       i40e_aqc_opc_run_phy_activity           = 0x0626,
 
        /* NVM commands */
        i40e_aqc_opc_nvm_read                   = 0x0701,
@@ -399,6 +400,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
 #define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY   0x0008
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
@@ -419,6 +421,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_LED             0x0061
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT                0x0064
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -1254,9 +1257,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN              0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN              0
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                        2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE             2
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
 
        __le32  tenant_id;
@@ -1752,7 +1755,12 @@ struct i40e_aqc_get_link_status {
        u8      config;
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      reserved[5];
+       u8      external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1     0x00
+#define I40E_AQ_LINK_POWER_CLASS_2     0x01
+#define I40E_AQ_LINK_POWER_CLASS_3     0x02
+#define I40E_AQ_LINK_POWER_CLASS_4     0x03
+       u8      reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1820,6 +1828,18 @@ enum i40e_aq_phy_reg_type {
        I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
 };
 
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+       __le16  activity_id;
+       u8      flags;
+       u8      reserved1;
+       __le32  control;
+       __le32  data;
+       u8      reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
index 7a00657dacda63634276477b16a0c61e6957aaeb..7d663fb6192756e7168c22e20ba101fea02cf478 100644 (file)
@@ -252,6 +252,22 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
+       if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+               unsigned int j = 0;
+               /* check to see if there are < 4 descriptors
+                * waiting to be written back, then kick the hardware to force
+                * them to be written back in case we stay in NAPI.
+                * In this mode on X722 we do not enable Interrupt.
+                */
+               j = i40evf_get_tx_pending(tx_ring);
+
+               if (budget &&
+                   ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+                   !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+                   (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+                       tx_ring->arm_wb = true;
+       }
+
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                      tx_ring->queue_index),
                                  total_packets, total_bytes);
index be1b72b938882d73d5f522f8aab58b0fda2275bb..9e15f68d9dddec10bab585b10cce367cbdef551d 100644 (file)
@@ -173,6 +173,7 @@ enum i40evf_state_t {
        __I40EVF_RESETTING,             /* in reset */
        /* Below here, watchdog is running */
        __I40EVF_DOWN,                  /* ready, can be opened */
+       __I40EVF_DOWN_PENDING,          /* descending, waiting for watchdog */
        __I40EVF_TESTING,               /* in ethtool self-test */
        __I40EVF_RUNNING,               /* opened, working */
 };
index a4c9feb589e7022619bc705d54ee6b282e9bbdc5..bd1c2728bc5c603887bc6d0a8090706c19ea3143 100644 (file)
@@ -459,6 +459,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
                                   struct ethtool_rxnfc *nfc)
 {
        struct i40e_hw *hw = &adapter->hw;
+       u32 flags = adapter->vf_res->vf_offload_flags;
 
        u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
                   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@@ -477,54 +478,50 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
 
        switch (nfc->flow_type) {
        case TCP_V4_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
        case TCP_V6_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
        case UDP_V4_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
        case UDP_V6_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
index 94da913b151da615f751d287b33a65de92d95829..66964eb6b7de3d0222b4b19e00c7be5c7ea0871b 100644 (file)
@@ -69,6 +69,8 @@ MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static struct workqueue_struct *i40evf_wq;
+
 /**
  * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
@@ -182,7 +184,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
        if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING |
                                I40EVF_FLAG_RESET_NEEDED))) {
                adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
-               schedule_work(&adapter->reset_task);
+               queue_work(i40evf_wq, &adapter->reset_task);
        }
 }
 
@@ -1032,7 +1034,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        struct i40evf_mac_filter *f;
 
-       if (adapter->state == __I40EVF_DOWN)
+       if (adapter->state <= __I40EVF_DOWN_PENDING)
                return;
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1122,7 +1124,9 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
        if (!adapter->vsi_res)
                return;
        kfree(adapter->tx_rings);
+       adapter->tx_rings = NULL;
        kfree(adapter->rx_rings);
+       adapter->rx_rings = NULL;
 }
 
 /**
@@ -1454,7 +1458,11 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
        int ret;
 
        /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-       hena = I40E_DEFAULT_RSS_HENA;
+       if (adapter->vf_res->vf_offload_flags &
+                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+               hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+       else
+               hena = I40E_DEFAULT_RSS_HENA;
        wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
 
@@ -2142,7 +2150,8 @@ static int i40evf_open(struct net_device *netdev)
                dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
                return -EIO;
        }
-       if (adapter->state != __I40EVF_DOWN || adapter->aq_required)
+
+       if (adapter->state != __I40EVF_DOWN)
                return -EBUSY;
 
        /* allocate transmit descriptors */
@@ -2197,14 +2206,14 @@ static int i40evf_close(struct net_device *netdev)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
-       if (adapter->state <= __I40EVF_DOWN)
+       if (adapter->state <= __I40EVF_DOWN_PENDING)
                return 0;
 
 
        set_bit(__I40E_DOWN, &adapter->vsi.state);
 
        i40evf_down(adapter);
-       adapter->state = __I40EVF_DOWN;
+       adapter->state = __I40EVF_DOWN_PENDING;
        i40evf_free_traffic_irqs(adapter);
 
        return 0;
@@ -2504,8 +2513,11 @@ static void i40evf_init_task(struct work_struct *work)
        if (adapter->vf_res->vf_offload_flags &
                    I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
                adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
-       if (!RSS_AQ(adapter))
-               i40evf_init_rss(adapter);
+
+       if (adapter->vf_res->vf_offload_flags &
+           I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+               adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
+
        err = i40evf_request_misc_irq(adapter);
        if (err)
                goto err_sw_init;
@@ -2885,6 +2897,11 @@ static int __init i40evf_init_module(void)
 
        pr_info("%s\n", i40evf_copyright);
 
+       i40evf_wq = create_singlethread_workqueue(i40evf_driver_name);
+       if (!i40evf_wq) {
+               pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
+               return -ENOMEM;
+       }
        ret = pci_register_driver(&i40evf_driver);
        return ret;
 }
@@ -2900,6 +2917,7 @@ module_init(i40evf_init_module);
 static void __exit i40evf_exit_module(void)
 {
        pci_unregister_driver(&i40evf_driver);
+       destroy_workqueue(i40evf_wq);
 }
 
 module_exit(i40evf_exit_module);
index c1c5262837572fdfb00d62f3e322c2f6ac7d14f1..d3739cc5b608488d6b1e91405c7e267d85327db3 100644 (file)
@@ -804,6 +804,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
                i40evf_free_all_tx_resources(adapter);
                i40evf_free_all_rx_resources(adapter);
+               if (adapter->state == __I40EVF_DOWN_PENDING)
+                       adapter->state = __I40EVF_DOWN;
                break;
        case I40E_VIRTCHNL_OP_VERSION:
        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
index a0c03834a2f7b3d7494b864db6e96ee590b36c81..55831188bc32431e935550a7da5671fa1d24d89a 100644 (file)
@@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 
        if (length <= 8 && (uintptr_t)data & 0x7) {
                /* Copy unaligned small data fragment to TSO header data area */
-               memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+               memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
                       data, length);
                desc->buf_ptr = txq->tso_hdrs_dma
-                       + txq->tx_curr_desc * TSO_HEADER_SIZE;
+                       + tx_index * TSO_HEADER_SIZE;
        } else {
                /* Alignment is okay, map buffer and hand off to hardware */
                txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
index fabc8df40392572d607f3e3417e60b54a67b4bf3..662c2ee268c7c7512c7d5f1290e1cb02fa3883dc 100644 (file)
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
 #include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 #include <linux/inetdevice.h>
-#include <linux/mbus.h>
-#include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/if_vlan.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
 #include <linux/io.h>
-#include <net/tso.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
-#include <linux/of_address.h>
 #include <linux/phy.h>
-#include <linux/clk.h>
-#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tso.h>
 
 /* Registers */
 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
@@ -373,6 +373,8 @@ struct mvneta_port {
 
        /* Core clock */
        struct clk *clk;
+       /* AXI clock */
+       struct clk *clk_bus;
        u8 mcast_count[256];
        u16 tx_ring_size;
        u16 rx_ring_size;
@@ -3242,26 +3244,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
        const struct mvneta_statistic *s;
        void __iomem *base = pp->base;
        u32 high, low, val;
+       u64 val64;
        int i;
 
        for (i = 0, s = mvneta_statistics;
             s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
             s++, i++) {
-               val = 0;
-
                switch (s->type) {
                case T_REG_32:
                        val = readl_relaxed(base + s->offset);
+                       pp->ethtool_stats[i] += val;
                        break;
                case T_REG_64:
                        /* Docs say to read low 32-bit then high */
                        low = readl_relaxed(base + s->offset);
                        high = readl_relaxed(base + s->offset + 4);
-                       val = (u64)high << 32 | low;
+                       val64 = (u64)high << 32 | low;
+                       pp->ethtool_stats[i] += val64;
                        break;
                }
-
-               pp->ethtool_stats[i] += val;
        }
 }
 
@@ -3605,7 +3606,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
        pp->indir[0] = rxq_def;
 
-       pp->clk = devm_clk_get(&pdev->dev, NULL);
+       pp->clk = devm_clk_get(&pdev->dev, "core");
+       if (IS_ERR(pp->clk))
+               pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
                err = PTR_ERR(pp->clk);
                goto err_put_phy_node;
@@ -3613,6 +3616,10 @@ static int mvneta_probe(struct platform_device *pdev)
 
        clk_prepare_enable(pp->clk);
 
+       pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+       if (!IS_ERR(pp->clk_bus))
+               clk_prepare_enable(pp->clk_bus);
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        pp->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pp->base)) {
@@ -3724,6 +3731,7 @@ err_free_stats:
 err_free_ports:
        free_percpu(pp->ports);
 err_clk:
+       clk_disable_unprepare(pp->clk_bus);
        clk_disable_unprepare(pp->clk);
 err_put_phy_node:
        of_node_put(phy_node);
@@ -3741,6 +3749,7 @@ static int mvneta_remove(struct platform_device *pdev)
        struct mvneta_port *pp = netdev_priv(dev);
 
        unregister_netdev(dev);
+       clk_disable_unprepare(pp->clk_bus);
        clk_disable_unprepare(pp->clk);
        free_percpu(pp->ports);
        free_percpu(pp->stats);
index 0c5237264e3e29519933180f307a531c8b34c054..bb77e2207804d9c3431b091f8d219297c8f92300 100644 (file)
@@ -1044,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
        mlxsw_reg_sftr_port_mask_set(payload, port, 1);
 }
 
+/* SFDF - Switch Filtering DB Flush
+ * --------------------------------
+ * The switch filtering DB flush register is used to flush the FDB.
+ * Note that FDB notifications are flushed as well.
+ */
+#define MLXSW_REG_SFDF_ID 0x2013
+#define MLXSW_REG_SFDF_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
+       .id = MLXSW_REG_SFDF_ID,
+       .len = MLXSW_REG_SFDF_LEN,
+};
+
+/* reg_sfdf_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfdf_flush_type {
+       MLXSW_REG_SFDF_FLUSH_PER_SWID,
+       MLXSW_REG_SFDF_FLUSH_PER_FID,
+       MLXSW_REG_SFDF_FLUSH_PER_PORT,
+       MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
+       MLXSW_REG_SFDF_FLUSH_PER_LAG,
+       MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+};
+
+/* reg_sfdf_flush_type
+ * Flush type.
+ * 0 - All SWID dynamic entries are flushed.
+ * 1 - All FID dynamic entries are flushed.
+ * 2 - All dynamic entries pointing to port are flushed.
+ * 3 - All FID dynamic entries pointing to port are flushed.
+ * 4 - All dynamic entries pointing to LAG are flushed.
+ * 5 - All FID dynamic entries pointing to LAG are flushed.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
+
+/* reg_sfdf_flush_static
+ * Static.
+ * 0 - Flush only dynamic entries.
+ * 1 - Flush both dynamic and static entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
+
+static inline void mlxsw_reg_sfdf_pack(char *payload,
+                                      enum mlxsw_reg_sfdf_flush_type type)
+{
+       MLXSW_REG_ZERO(sfdf, payload);
+       mlxsw_reg_sfdf_flush_type_set(payload, type);
+       mlxsw_reg_sfdf_flush_static_set(payload, true);
+}
+
+/* reg_sfdf_fid
+ * FID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
+
+/* reg_sfdf_system_port
+ * Port to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
+
+/* reg_sfdf_port_fid_system_port
+ * Port to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
+
+/* reg_sfdf_lag_id
+ * LAG ID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
+
+/* reg_sfdf_lag_fid_lag_id
+ * LAG ID to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
+
 /* SLDR - Switch LAG Descriptor Register
  * -----------------------------------------
  * The switch LAG descriptor register is populated by LAG descriptors.
@@ -1701,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
  * Module number.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
 
 /* reg_pmlp_tx_lane
  * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
 
 /* reg_pmlp_rx_lane
  * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
  * equal to Tx lane.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
 
 static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
 {
@@ -3121,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
                return "SFGC";
        case MLXSW_REG_SFTR_ID:
                return "SFTR";
+       case MLXSW_REG_SFDF_ID:
+               return "SFDF";
        case MLXSW_REG_SLDR_ID:
                return "SLDR";
        case MLXSW_REG_SLCR_ID:
index ce6845d534a83f7acea04c0d0c4401103192f0ab..217856bdd400474d400d1e4e3dd275a096f50db5 100644 (file)
@@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
        .profile                = &mlxsw_sp_config_profile,
 };
 
+static int
+mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
+       mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+                                   u16 fid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
+       mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+       mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
+                                               mlxsw_sp_port->local_port);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
+       mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+                                     u16 fid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
+       mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+       mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int err, last_err = 0;
+       u16 vid;
+
+       for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+               err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
+               if (err)
+                       last_err = err;
+       }
+
+       return last_err;
+}
+
+static int
+__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int err, last_err = 0;
+       u16 vid;
+
+       for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+               err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
+               if (err)
+                       last_err = err;
+       }
+
+       return last_err;
+}
+
+static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       if (!list_empty(&mlxsw_sp_port->vports_list))
+               if (mlxsw_sp_port->lagged)
+                       return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
+               else
+                       return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
+       else
+               if (mlxsw_sp_port->lagged)
+                       return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
+               else
+                       return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+}
+
+static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+       u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
+       u16 fid = mlxsw_sp_vfid_to_fid(vfid);
+
+       if (mlxsw_sp_vport->lagged)
+               return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
+                                                            fid);
+       else
+               return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+}
+
 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
 {
        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     bool flush_fdb)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
 
+       if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
+               netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
+
        mlxsw_sp_port->learning = 0;
        mlxsw_sp_port->learning_sync = 0;
        mlxsw_sp_port->uc_flood = 0;
@@ -2200,10 +2313,15 @@ err_col_port_enable:
        return err;
 }
 
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
+                                      struct net_device *br_dev,
+                                      bool flush_fdb);
+
 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
                                   struct net_device *lag_dev)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_port *mlxsw_sp_vport;
        struct mlxsw_sp_upper *lag;
        u16 lag_id = mlxsw_sp_port->lag_id;
        int err;
@@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
        if (err)
                return err;
 
+       /* In case we leave a LAG device that has bridges built on top,
+        * then their teardown sequence is never issued and we need to
+        * invoke the necessary cleanup routines ourselves.
+        */
+       list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+                           vport.list) {
+               struct net_device *br_dev;
+
+               if (!mlxsw_sp_vport->bridged)
+                       continue;
+
+               br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
+               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
+       }
+
+       if (mlxsw_sp_port->bridged) {
+               mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
+               mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
+
+               if (lag->ref_count == 1)
+                       mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+       }
+
        if (lag->ref_count == 1) {
+               if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
+                       netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
                err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
                if (err)
                        return err;
@@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
 }
 
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                      struct net_device *br_dev);
-
 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
                                   struct net_device *vlan_dev)
 {
@@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
                struct net_device *br_dev;
 
                br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
-               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev);
+               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
        }
 
        mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
@@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
                                }
                                mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
                        } else {
-                               err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+                               err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+                                                                true);
                                mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
                                if (err) {
                                        netdev_err(dev, "Failed to leave bridge\n");
@@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                      struct net_device *br_dev)
+                                      struct net_device *br_dev,
+                                      bool flush_fdb)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
        u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
@@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
                goto err_vport_flood_set;
        }
 
+       if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
+               netdev_err(dev, "Failed to flush FDB\n");
+
        /* Switch between the vFIDs and destroy the old one if needed. */
        new_vfid->nr_vports++;
        mlxsw_sp_vport->vport.vfid = new_vfid;
@@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
                        if (!mlxsw_sp_vport)
                                return NOTIFY_DONE;
                        err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
-                                                         upper_dev);
+                                                         upper_dev, true);
                        if (err) {
                                netdev_err(dev, "Failed to leave bridge\n");
                                return NOTIFY_BAD;
index a23dc610d2593aedce974eddf94789d3aaf181a6..7f42eb1c320e1c02ff12892c94261800e29eed64 100644 (file)
@@ -120,7 +120,6 @@ struct mlxsw_sp {
        } fdb_notify;
 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
        u32 ageing_time;
-       struct mutex fdb_lock;  /* Make sure FDB sessions are atomic. */
        struct mlxsw_sp_upper master_bridge;
        struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
 };
@@ -254,5 +253,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
                           __be16 __always_unused proto, u16 vid);
 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
                             bool set, bool only_uc);
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
 
 #endif
index 45479ef5bcf4f44848f9989f83f46ce66fe4c11b..e492ca2cdecd9953d5f0ec4124f7077e839803dc 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/if_bridge.h>
 #include <linux/workqueue.h>
 #include <linux/jiffies.h>
+#include <linux/rtnetlink.h>
 #include <net/switchdev.h>
 
 #include "spectrum.h"
@@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
        int err;
 
        switch (state) {
-       case BR_STATE_DISABLED: /* fall-through */
        case BR_STATE_FORWARDING:
                spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
                break;
-       case BR_STATE_LISTENING: /* fall-through */
        case BR_STATE_LEARNING:
                spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
                break;
+       case BR_STATE_LISTENING: /* fall-through */
+       case BR_STATE_DISABLED: /* fall-through */
        case BR_STATE_BLOCKING:
                spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
                break;
@@ -936,6 +937,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
                                         vlan->vid_begin, vlan->vid_end, false);
 }
 
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       u16 vid;
+
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+               __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+}
+
 static int
 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
                             const struct switchdev_obj_port_fdb *fdb)
@@ -1040,10 +1049,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
 
 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
                                  struct switchdev_obj_port_fdb *fdb,
-                                 switchdev_obj_dump_cb_t *cb)
+                                 switchdev_obj_dump_cb_t *cb,
+                                 struct net_device *orig_dev)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       u16 vport_vid = 0, vport_fid = 0;
+       struct mlxsw_sp_port *tmp;
+       u16 vport_fid = 0;
        char *sfd_pl;
        char mac[ETH_ALEN];
        u16 fid;
@@ -1058,13 +1069,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
        if (!sfd_pl)
                return -ENOMEM;
 
-       mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
        if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
                u16 tmp;
 
                tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
                vport_fid = mlxsw_sp_vfid_to_fid(tmp);
-               vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
        }
 
        mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
@@ -1088,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
                                mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
                                                        &local_port);
                                if (local_port == mlxsw_sp_port->local_port) {
-                                       if (vport_fid && vport_fid != fid)
-                                               continue;
-                                       else if (vport_fid)
-                                               fdb->vid = vport_vid;
-                                       else
+                                       if (vport_fid && vport_fid == fid)
+                                               fdb->vid = 0;
+                                       else if (!vport_fid &&
+                                                !mlxsw_sp_fid_is_vfid(fid))
                                                fdb->vid = fid;
+                                       else
+                                               continue;
                                        ether_addr_copy(fdb->addr, mac);
                                        fdb->ndm_state = NUD_REACHABLE;
                                        err = cb(&fdb->obj);
@@ -1104,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
                        case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
                                mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
                                                            mac, &fid, &lag_id);
-                               if (mlxsw_sp_port ==
-                                   mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
-                                       if (vport_fid && vport_fid != fid)
+                               tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
+                               if (tmp && tmp->local_port ==
+                                   mlxsw_sp_port->local_port) {
+                                       /* LAG records can only point to LAG
+                                        * devices or VLAN devices on top.
+                                        */
+                                       if (!netif_is_lag_master(orig_dev) &&
+                                           !is_vlan_dev(orig_dev))
                                                continue;
-                                       else if (vport_fid)
-                                               fdb->vid = vport_vid;
-                                       else
+                                       if (vport_fid && vport_fid == fid)
+                                               fdb->vid = 0;
+                                       else if (!vport_fid &&
+                                                !mlxsw_sp_fid_is_vfid(fid))
                                                fdb->vid = fid;
+                                       else
+                                               continue;
                                        ether_addr_copy(fdb->addr, mac);
                                        fdb->ndm_state = NUD_REACHABLE;
                                        err = cb(&fdb->obj);
@@ -1124,7 +1142,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
        } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
 
 out:
-       mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
        kfree(sfd_pl);
        return stored_err ? stored_err : err;
 }
@@ -1176,7 +1193,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
                break;
        case SWITCHDEV_OBJ_ID_PORT_FDB:
                err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
-                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb,
+                                            obj->orig_dev);
                break;
        default:
                err = -EOPNOTSUPP;
@@ -1194,14 +1212,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
        .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
 };
 
-static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
-                                       bool adding, char *mac, u16 vid,
+static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
+                                       char *mac, u16 vid,
                                        struct net_device *dev)
 {
        struct switchdev_notifier_fdb_info info;
        unsigned long notifier_type;
 
-       if (learning && learning_sync) {
+       if (learning_sync) {
                info.addr = mac;
                info.vid = vid;
                notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
@@ -1237,7 +1255,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
                        netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
                        goto just_remove;
                }
-               vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+               vid = 0;
                /* Override the physical port with the vPort. */
                mlxsw_sp_port = mlxsw_sp_vport;
        } else {
@@ -1257,8 +1275,7 @@ do_fdb_op:
 
        if (!do_notification)
                return;
-       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
-                                   mlxsw_sp_port->learning_sync,
+       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
                                    adding, mac, vid, mlxsw_sp_port->dev);
        return;
 
@@ -1273,6 +1290,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
                                                bool adding)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
+       struct net_device *dev;
        char mac[ETH_ALEN];
        u16 lag_vid = 0;
        u16 lag_id;
@@ -1298,11 +1316,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
                        goto just_remove;
                }
 
-               vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-               lag_vid = vid;
+               lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+               dev = mlxsw_sp_vport->dev;
+               vid = 0;
                /* Override the physical port with the vPort. */
                mlxsw_sp_port = mlxsw_sp_vport;
        } else {
+               dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
                vid = fid;
        }
 
@@ -1319,10 +1339,8 @@ do_fdb_op:
 
        if (!do_notification)
                return;
-       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
-                                   mlxsw_sp_port->learning_sync,
-                                   adding, mac, vid,
-                                   mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
+       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
+                                   vid, dev);
        return;
 
 just_remove:
@@ -1374,7 +1392,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
 
        mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
 
-       mutex_lock(&mlxsw_sp->fdb_lock);
+       rtnl_lock();
        do {
                mlxsw_reg_sfn_pack(sfn_pl);
                err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@@ -1387,7 +1405,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
                        mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
 
        } while (num_rec);
-       mutex_unlock(&mlxsw_sp->fdb_lock);
+       rtnl_unlock();
 
        kfree(sfn_pl);
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
@@ -1402,7 +1420,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
                dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
                return err;
        }
-       mutex_init(&mlxsw_sp->fdb_lock);
        INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
        mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
index a10c928bbd6b9bf5d192dfc6453395312062fe3f..00cfd95ca59d53fe040ef5b002e7d30604d35e96 100644 (file)
 
 #include "moxart_ether.h"
 
+static inline void moxart_desc_write(u32 data, u32 *desc)
+{
+       *desc = cpu_to_le32(data);
+}
+
+static inline u32 moxart_desc_read(u32 *desc)
+{
+       return le32_to_cpu(*desc);
+}
+
 static inline void moxart_emac_write(struct net_device *ndev,
                                     unsigned int reg, unsigned long value)
 {
@@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
 static void moxart_mac_setup_desc_ring(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-       void __iomem *desc;
+       void *desc;
        int i;
 
        for (i = 0; i < TX_DESC_NUM; i++) {
@@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
 
                priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
        }
-       writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+       moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
 
        priv->tx_head = 0;
        priv->tx_tail = 0;
@@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
        for (i = 0; i < RX_DESC_NUM; i++) {
                desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
                memset(desc, 0, RX_REG_DESC_SIZE);
-               writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
-               writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+               moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+               moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
                       desc + RX_REG_OFFSET_DESC1);
 
                priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
@@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
                if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
                        netdev_err(ndev, "DMA mapping error\n");
 
-               writel(priv->rx_mapping[i],
+               moxart_desc_write(priv->rx_mapping[i],
                       desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
-               writel(priv->rx_buf[i],
+               moxart_desc_write((uintptr_t)priv->rx_buf[i],
                       desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
        }
-       writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+       moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
 
        priv->rx_head = 0;
 
@@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                                                      napi);
        struct net_device *ndev = priv->ndev;
        struct sk_buff *skb;
-       void __iomem *desc;
+       void *desc;
        unsigned int desc0, len;
        int rx_head = priv->rx_head;
        int rx = 0;
 
        while (rx < budget) {
                desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
-               desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+               desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
+               rmb(); /* ensure desc0 is up to date */
 
                if (desc0 & RX_DESC0_DMA_OWN)
                        break;
@@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                        priv->stats.multicast++;
 
 rx_next:
-               writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+               wmb(); /* prevent setting ownership back too early */
+               moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
 
                rx_head = RX_NEXT(rx_head);
                priv->rx_head = rx_head;
@@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
 static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-       void __iomem *desc;
+       void *desc;
        unsigned int len;
        unsigned int tx_head = priv->tx_head;
        u32 txdes1;
@@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
 
        spin_lock_irq(&priv->txlock);
-       if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+       if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
                net_dbg_ratelimited("no TX space for packet\n");
                priv->stats.tx_dropped++;
                goto out_unlock;
        }
+       rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
 
        len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
 
@@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        priv->tx_len[tx_head] = len;
        priv->tx_skb[tx_head] = skb;
 
-       writel(priv->tx_mapping[tx_head],
+       moxart_desc_write(priv->tx_mapping[tx_head],
               desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
-       writel(skb->data,
+       moxart_desc_write((uintptr_t)skb->data,
               desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
 
        if (skb->len < ETH_ZLEN) {
@@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
        if (tx_head == TX_DESC_NUM_MASK)
                txdes1 |= TX_DESC1_END;
-       writel(txdes1, desc + TX_REG_OFFSET_DESC1);
-       writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+       moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
+       wmb(); /* flush descriptor before transferring ownership */
+       moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
 
        /* start to send packet */
        writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
index 2be9280d608ce9efe320ea7b20b01b1c350a023e..93a9563ac7c6730eec8240ac187a86b9831c9741 100644 (file)
@@ -300,7 +300,7 @@ struct moxart_mac_priv_t {
 
        dma_addr_t rx_base;
        dma_addr_t rx_mapping[RX_DESC_NUM];
-       void __iomem *rx_desc_base;
+       void *rx_desc_base;
        unsigned char *rx_buf_base;
        unsigned char *rx_buf[RX_DESC_NUM];
        unsigned int rx_head;
@@ -308,7 +308,7 @@ struct moxart_mac_priv_t {
 
        dma_addr_t tx_base;
        dma_addr_t tx_mapping[TX_DESC_NUM];
-       void __iomem *tx_desc_base;
+       void *tx_desc_base;
        unsigned char *tx_buf_base;
        unsigned char *tx_buf[RX_DESC_NUM];
        unsigned int tx_head;
index 50d5604833edc8e0ffde1f7f8974002852f8bb97..e0993eba5df3f8081bff6329a97b4753e876cbe3 100644 (file)
@@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-#ifdef CONFIG_PCI_MSI
-
 static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
 {
        struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
@@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
        if (vdev->config.intr_type == MSI_X)
                pci_disable_msix(vdev->pdev);
 }
-#endif
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-#ifdef CONFIG_PCI_MSI
-       if (vdev->config.intr_type == MSI_X) {
+       if (IS_ENABLED(CONFIG_PCI_MSI) &&
+           vdev->config.intr_type == MSI_X) {
                vxge_rem_msix_isr(vdev);
-       } else
-#endif
-       if (vdev->config.intr_type == INTA) {
+       } else if (vdev->config.intr_type == INTA) {
                        synchronize_irq(vdev->pdev->irq);
                        free_irq(vdev->pdev->irq, vdev);
        }
@@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev)
 static int vxge_add_isr(struct vxgedev *vdev)
 {
        int ret = 0;
-#ifdef CONFIG_PCI_MSI
        int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
        int pci_fun = PCI_FUNC(vdev->pdev->devfn);
 
-       if (vdev->config.intr_type == MSI_X)
+       if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
                ret = vxge_enable_msix(vdev);
 
        if (ret) {
@@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
                vdev->config.intr_type = INTA;
        }
 
-       if (vdev->config.intr_type == MSI_X) {
+       if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
                for (intr_idx = 0;
                     intr_idx < (vdev->no_of_vpath *
                        VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
@@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev)
                vdev->vxge_entries[intr_cnt].in_use = 1;
                vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
        }
-INTA_MODE:
-#endif
 
+INTA_MODE:
        if (vdev->config.intr_type == INTA) {
                snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
                        "%s:vxge:INTA", vdev->ndev->name);
@@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
        if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
                max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
 
-#ifndef CONFIG_PCI_MSI
-       vxge_debug_init(VXGE_ERR,
-               "%s: This Kernel does not support "
-               "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
-       *intr_type = INTA;
-#endif
+       if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+               vxge_debug_init(VXGE_ERR,
+                       "%s: This Kernel does not support "
+                       "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
+               *intr_type = INTA;
+       }
 
        /* Configure whether MSI-X or IRQL. */
        switch (*intr_type) {
index 9fbe92ac225b00f93ed33d187f03f3db2f879f2e..b2160d1b9c7175daee2a2564082dfc38c096c4b2 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
  *
@@ -837,6 +837,8 @@ static inline void ravb_write(struct net_device *ndev, u32 data,
        iowrite32(data, priv->addr + reg);
 }
 
+void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
+                u32 set);
 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
 
 irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
index ac43ed914fcf270653a6101a2f6c736b7ac2ef00..c936682aae68df0808eb68c63c25b42b0e3b4dca 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
  *
                 NETIF_MSG_RX_ERR | \
                 NETIF_MSG_TX_ERR)
 
+void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
+                u32 set)
+{
+       ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
+}
+
 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
 {
        int i;
@@ -59,8 +65,7 @@ static int ravb_config(struct net_device *ndev)
        int error;
 
        /* Set config mode */
-       ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
-                  CCC);
+       ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
        /* Check if the operating mode is changed to the config mode */
        error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
        if (error)
@@ -72,13 +77,8 @@ static int ravb_config(struct net_device *ndev)
 static void ravb_set_duplex(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
-       u32 ecmr = ravb_read(ndev, ECMR);
 
-       if (priv->duplex)       /* Full */
-               ecmr |=  ECMR_DM;
-       else                    /* Half */
-               ecmr &= ~ECMR_DM;
-       ravb_write(ndev, ecmr, ECMR);
+       ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
 }
 
 static void ravb_set_rate(struct net_device *ndev)
@@ -131,13 +131,8 @@ static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 {
        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
                                                 mdiobb);
-       u32 pir = ravb_read(priv->ndev, PIR);
 
-       if (set)
-               pir |=  mask;
-       else
-               pir &= ~mask;
-       ravb_write(priv->ndev, pir, PIR);
+       ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 }
 
 /* MDC pin control */
@@ -393,9 +388,9 @@ static int ravb_dmac_init(struct net_device *ndev)
        ravb_ring_format(ndev, RAVB_NC);
 
 #if defined(__LITTLE_ENDIAN)
-       ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+       ravb_modify(ndev, CCC, CCC_BOC, 0);
 #else
-       ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+       ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
 #endif
 
        /* Set AVB RX */
@@ -418,8 +413,7 @@ static int ravb_dmac_init(struct net_device *ndev)
        ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 
        /* Setting the control will start the AVB-DMAC process. */
-       ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
-                  CCC);
+       ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
 
        return 0;
 }
@@ -493,7 +487,7 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
                                break;
                        }
                }
-               ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+               ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
        }
 }
 
@@ -613,13 +607,13 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
 static void ravb_rcv_snd_disable(struct net_device *ndev)
 {
        /* Disable TX and RX */
-       ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+       ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
 }
 
 static void ravb_rcv_snd_enable(struct net_device *ndev)
 {
        /* Enable TX and RX */
-       ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+       ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
 }
 
 /* function for waiting dma process finished */
@@ -812,8 +806,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
 
        /* Re-enable RX/TX interrupts */
        spin_lock_irqsave(&priv->lock, flags);
-       ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
-       ravb_write(ndev, ravb_read(ndev, TIC)  | mask,  TIC);
+       ravb_modify(ndev, RIC0, mask, mask);
+       ravb_modify(ndev, TIC,  mask, mask);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -852,8 +846,7 @@ static void ravb_adjust_link(struct net_device *ndev)
                        ravb_set_rate(ndev);
                }
                if (!priv->link) {
-                       ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
-                                  ECMR);
+                       ravb_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = true;
                        priv->link = phydev->link;
                        if (priv->no_avb_link)
@@ -1393,7 +1386,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        desc--;
        desc->die_dt = DT_FSTART;
 
-       ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
+       ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
 
        priv->cur_tx[q] += NUM_TX_DESC;
        if (priv->cur_tx[q] - priv->dirty_tx[q] >
@@ -1468,15 +1461,10 @@ static void ravb_set_rx_mode(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        unsigned long flags;
-       u32 ecmr;
 
        spin_lock_irqsave(&priv->lock, flags);
-       ecmr = ravb_read(ndev, ECMR);
-       if (ndev->flags & IFF_PROMISC)
-               ecmr |=  ECMR_PRM;
-       else
-               ecmr &= ~ECMR_PRM;
-       ravb_write(ndev, ecmr, ECMR);
+       ravb_modify(ndev, ECMR, ECMR_PRM,
+                   ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 }
@@ -1804,14 +1792,12 @@ static int ravb_probe(struct platform_device *pdev)
 
        /* Set AVB config mode */
        if (chip_id == RCAR_GEN2) {
-               ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
-                          CCC_OPC_CONFIG, CCC);
+               ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
                /* Set CSEL value */
-               ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) |
-                          CCC_CSEL_HPB, CCC);
+               ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
        } else {
-               ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
-                          CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
+               ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+                           CCC_GAC | CCC_CSEL_HPB);
        }
 
        /* Set CSEL value */
@@ -1824,7 +1810,7 @@ static int ravb_probe(struct platform_device *pdev)
                goto out_release;
 
        /* Request GTI loading */
-       ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+       ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
 
        /* Allocate descriptor base address table */
        priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
index 7a8ce920c49e709b067321ae91306153f4f24390..57992ccc46575dc6ba21bc4d7ce86e73576e46b9 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2013-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@ static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
        if (error)
                return error;
 
-       ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+       ravb_modify(ndev, GCCR, request, request);
        return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
 }
 
@@ -185,7 +185,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
                                                 ptp.info);
        struct net_device *ndev = priv->ndev;
        unsigned long flags;
-       u32 gic;
 
        if (req->index)
                return -EINVAL;
@@ -195,12 +194,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
        priv->ptp.extts[req->index] = on;
 
        spin_lock_irqsave(&priv->lock, flags);
-       gic = ravb_read(ndev, GIC);
-       if (on)
-               gic |= GIC_PTCE;
-       else
-               gic &= ~GIC_PTCE;
-       ravb_write(ndev, gic, GIC);
+       ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -216,7 +210,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
        struct ravb_ptp_perout *perout;
        unsigned long flags;
        int error = 0;
-       u32 gic;
 
        if (req->index)
                return -EINVAL;
@@ -248,9 +241,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
                error = ravb_ptp_update_compare(priv, (u32)start_ns);
                if (!error) {
                        /* Unmask interrupt */
-                       gic = ravb_read(ndev, GIC);
-                       gic |= GIC_PTME;
-                       ravb_write(ndev, gic, GIC);
+                       ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
                }
        } else  {
                spin_lock_irqsave(&priv->lock, flags);
@@ -259,9 +250,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
                perout->period = 0;
 
                /* Mask interrupt */
-               gic = ravb_read(ndev, GIC);
-               gic &= ~GIC_PTME;
-               ravb_write(ndev, gic, GIC);
+               ravb_modify(ndev, GIC, GIC_PTME, 0);
        }
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -331,7 +320,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        unsigned long flags;
-       u32 gccr;
 
        priv->ptp.info = ravb_ptp_info;
 
@@ -340,8 +328,7 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
 
        spin_lock_irqsave(&priv->lock, flags);
        ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
-       gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
-       ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+       ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 
index dfa9e59c9442884dfe6a52a5c6df0c4c70fa4737..0a150b2289146fe6c996b4ad338b9d6b45f56803 100644 (file)
@@ -3,7 +3,7 @@
  *  Copyright (C) 2014  Renesas Electronics Corporation
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
- *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
+ *  Copyright (C) 2013-2016 Cogent Embedded, Inc.
  *  Copyright (C) 2014 Codethink Limited
  *
  *  This program is free software; you can redistribute it and/or modify it
@@ -428,6 +428,13 @@ static u32 sh_eth_read(struct net_device *ndev, int enum_index)
        return ioread32(mdp->addr + offset);
 }
 
+static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
+                         u32 set)
+{
+       sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
+                    enum_index);
+}
+
 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
        return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -467,10 +474,7 @@ static void sh_eth_set_duplex(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+       sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
 }
 
 static void sh_eth_chip_reset(struct net_device *ndev)
@@ -583,10 +587,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
                break;
        case 100:/* 100BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
                break;
        default:
                break;
@@ -649,10 +653,10 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
                break;
        case 100:/* 100BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
                break;
        default:
                break;
@@ -924,8 +928,7 @@ static int sh_eth_reset(struct net_device *ndev)
 
        if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
                sh_eth_write(ndev, EDSR_ENALL, EDSR);
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
-                            EDMR);
+               sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
 
                ret = sh_eth_check_reset(ndev);
                if (ret)
@@ -949,11 +952,9 @@ static int sh_eth_reset(struct net_device *ndev)
                if (mdp->cd->select_mii)
                        sh_eth_select_mii(ndev);
        } else {
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
-                            EDMR);
+               sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
                mdelay(3);
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
-                            EDMR);
+               sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
        }
 
        return ret;
@@ -1285,7 +1286,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
                     RFLR);
 
-       sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+       sh_eth_modify(ndev, EESR, 0, 0);
        if (start) {
                mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
@@ -1532,15 +1533,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
 {
        /* disable tx and rx */
-       sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
-               ~(ECMR_RE | ECMR_TE), ECMR);
+       sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
 }
 
 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
 {
        /* enable tx and rx */
-       sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
-               (ECMR_RE | ECMR_TE), ECMR);
+       sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
 }
 
 /* error control function */
@@ -1569,13 +1568,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
                                sh_eth_rcv_snd_disable(ndev);
                        } else {
                                /* Link Up */
-                               sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
-                                                  ~DMAC_M_ECI, EESIPR);
+                               sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
                                /* clear int */
-                               sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
-                                            ECSR);
-                               sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
-                                                  DMAC_M_ECI, EESIPR);
+                               sh_eth_modify(ndev, ECSR, 0, 0);
+                               sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
+                                             DMAC_M_ECI);
                                /* enable tx and rx */
                                sh_eth_rcv_snd_enable(ndev);
                        }
@@ -1765,9 +1762,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                                mdp->cd->set_rate(ndev);
                }
                if (!mdp->link) {
-                       sh_eth_write(ndev,
-                                    sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
-                                    ECMR);
+                       sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = 1;
                        mdp->link = phydev->link;
                        if (mdp->cd->no_psr || mdp->no_ether_link)
index a4ab71d43e4e9d521e2a58f75aca4a4dff3f9c30..166a7fc87e2f41966ab2b54a641ea1c7bfc46f1d 100644 (file)
@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
        info.addr = lw->addr;
        info.vid = lw->vid;
 
+       rtnl_lock();
        if (learned && removing)
                call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
                                         lw->rocker_port->dev, &info.info);
        else if (learned && !removing)
                call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
                                         lw->rocker_port->dev, &info.info);
+       rtnl_unlock();
 
        rocker_port_kfree(lw->trans, work);
 }
index dcc80b9d4370eb47b1e001cf7c44fa95ebdc8ce1..31e968561d5ce37f2c1a4b0eb3553866a2f0df1e 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
 samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
                sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o  sxgbe_mdio.o \
-               sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
+               sxgbe_ethtool.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
deleted file mode 100644 (file)
index 51c3219..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/* 10G controller driver for Samsung SoCs
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include "sxgbe_common.h"
-#include "sxgbe_xpcs.h"
-
-static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
-{
-       u32 value;
-       struct sxgbe_priv_data *priv = netdev_priv(ndev);
-
-       value = readl(priv->ioaddr + XPCS_OFFSET + reg);
-
-       return value;
-}
-
-static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
-{
-       struct sxgbe_priv_data *priv = netdev_priv(ndev);
-
-       writel(data, priv->ioaddr + XPCS_OFFSET + reg);
-
-       return 0;
-}
-
-int sxgbe_xpcs_init(struct net_device *ndev)
-{
-       u32 value;
-
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       /* 10G XAUI mode */
-       sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
-       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
-       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
-
-       do {
-               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
-       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
-
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
-
-       do {
-               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
-       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
-
-       return 0;
-}
-
-int sxgbe_xpcs_init_1G(struct net_device *ndev)
-{
-       int value;
-
-       /* 10GBASE-X PCS (1G) mode */
-       sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
-       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
-
-       value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
-       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
-       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
-
-       do {
-               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
-       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
-
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
-
-       /* Auto Negotiation cluase 37 enable */
-       value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
-       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
-
-       return 0;
-}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
deleted file mode 100644 (file)
index 6b26a50..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* 10G controller driver for Samsung SoCs
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Author: Byungho An <bh74.an@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __SXGBE_XPCS_H__
-#define __SXGBE_XPCS_H__
-
-/* XPCS Registers */
-#define XPCS_OFFSET                    0x1A060000
-#define SR_PCS_MMD_CONTROL1            0x030000
-#define SR_PCS_CONTROL2                        0x030007
-#define VR_PCS_MMD_XAUI_MODE_CONTROL   0x038004
-#define VR_PCS_MMD_DIGITAL_STATUS      0x038010
-#define SR_MII_MMD_CONTROL             0x1F0000
-#define SR_MII_MMD_AN_ADV              0x1F0004
-#define SR_MII_MMD_AN_LINK_PARTNER_BA  0x1F0005
-#define VR_MII_MMD_AN_CONTROL          0x1F8001
-#define VR_MII_MMD_AN_INT_STATUS       0x1F8002
-
-#define XPCS_QSEQ_STATE_STABLE         0x10
-#define XPCS_QSEQ_STATE_MPLLOFF                0x1c
-#define XPCS_TYPE_SEL_R                        0x00
-#define XPCS_TYPE_SEL_X                        0x01
-#define XPCS_TYPE_SEL_W                        0x02
-#define XPCS_XAUI_MODE                 0x00
-#define XPCS_RXAUI_MODE                        0x01
-
-int sxgbe_xpcs_init(struct net_device *ndev);
-int sxgbe_xpcs_init_1G(struct net_device *ndev);
-
-#endif /* __SXGBE_XPCS_H__ */
index e23a642357e7c01d7010ae070b47632b3a926ba9..2437227712dcd0ee5fd0404113e35527646a8b76 100644 (file)
@@ -51,7 +51,6 @@
 #endif
 
 #ifdef CONFIG_PPC_PMAC
-#include <asm/pci-bridge.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
index cc106d892e2975c924eafc8e850a4da8188ef227..942a95db20614ae2e5a9441e2514641a32695eaf 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/mutex.h>
 #include <linux/highmem.h>
 #include <linux/if_vlan.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/sunvnet.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <linux/icmpv6.h>
@@ -389,17 +391,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
        if (vio_version_after_eq(&port->vio, 1, 8)) {
                struct vio_net_dext *dext = vio_net_ext(desc);
 
+               skb_reset_network_header(skb);
+
                if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
                        if (skb->protocol == ETH_P_IP) {
-                               struct iphdr *iph = (struct iphdr *)skb->data;
+                               struct iphdr *iph = ip_hdr(skb);
 
                                iph->check = 0;
                                ip_send_check(iph);
                        }
                }
                if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
-                   skb->ip_summed == CHECKSUM_NONE)
-                       vnet_fullcsum(skb);
+                   skb->ip_summed == CHECKSUM_NONE) {
+                       if (skb->protocol == htons(ETH_P_IP)) {
+                               struct iphdr *iph = ip_hdr(skb);
+                               int ihl = iph->ihl * 4;
+
+                               skb_reset_transport_header(skb);
+                               skb_set_transport_header(skb, ihl);
+                               vnet_fullcsum(skb);
+                       }
+               }
                if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
                        skb->ip_summed = CHECKSUM_PARTIAL;
                        skb->csum_level = 0;
@@ -530,6 +542,8 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        err = vnet_rx_one(port, desc);
        if (err == -ECONNRESET)
                return err;
+       trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
+                         index, desc->hdr.ack);
        desc->hdr.state = VIO_DESC_DONE;
        err = put_rx_desc(port, dr, desc, index);
        if (err < 0)
@@ -577,9 +591,15 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
                ack_start = ack_end = vio_dring_prev(dr, start);
        if (send_ack) {
                port->napi_resume = false;
+               trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
+                                              port->vio._peer_sid,
+                                              ack_end, *npkts);
                return vnet_send_ack(port, dr, ack_start, ack_end,
                                     VIO_DRING_STOPPED);
        } else  {
+               trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
+                                               port->vio._peer_sid,
+                                               ack_end, *npkts);
                port->napi_resume = true;
                port->napi_stop_idx = ack_end;
                return 1;
@@ -653,6 +673,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
        /* sync for race conditions with vnet_start_xmit() and tell xmit it
         * is time to send a trigger.
         */
+       trace_vnet_rx_stopped_ack(port->vio._local_sid,
+                                 port->vio._peer_sid, end);
        dr->cons = vio_dring_next(dr, end);
        desc = vio_dring_entry(dr, dr->cons);
        if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
@@ -876,6 +898,9 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
        int retries = 0;
 
        if (port->stop_rx) {
+               trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
+                                                 port->vio._peer_sid,
+                                                 port->stop_rx_idx, -1);
                err = vnet_send_ack(port,
                                    &port->vio.drings[VIO_DRIVER_RX_RING],
                                    port->stop_rx_idx, -1,
@@ -898,6 +923,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
                if (retries++ > VNET_MAX_RETRIES)
                        break;
        } while (err == -EAGAIN);
+       trace_vnet_tx_trigger(port->vio._local_sid,
+                             port->vio._peer_sid, start, err);
 
        return err;
 }
@@ -1404,8 +1431,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * producer to consumer announcement that work is available to the
         * consumer
         */
-       if (!port->start_cons)
-               goto ldc_start_done; /* previous trigger suffices */
+       if (!port->start_cons) { /* previous trigger suffices */
+               trace_vnet_skip_tx_trigger(port->vio._local_sid,
+                                          port->vio._peer_sid, dr->cons);
+               goto ldc_start_done;
+       }
 
        err = __vnet_tx_trigger(port, dr->cons);
        if (unlikely(err < 0)) {
index 70814b7386b3119e55ac8be1a92cfdb8c0740c5c..fc8bbff2d7e37ec19d807008c1e9b70040551ea2 100644 (file)
@@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev)
        }
        netdev_reset_queue(ndev);
 
+       dwceqos_init_hw(lp);
        napi_enable(&lp->napi);
        phy_start(lp->phy_dev);
-       dwceqos_init_hw(lp);
 
        netif_start_queue(ndev);
        tasklet_enable(&lp->tx_bdreclaim_tasklet);
index 657b65bf5cac64254b4db7c845038d2e40549a8c..18bf3a8fdc505ad2ed8a0fa3b81dfeb7d833b94d 100644 (file)
@@ -82,7 +82,7 @@ struct cpdma_desc {
 
 struct cpdma_desc_pool {
        phys_addr_t             phys;
-       u32                     hw_addr;
+       dma_addr_t              hw_addr;
        void __iomem            *iomap;         /* ioremap map */
        void                    *cpumap;        /* dma_alloc map */
        int                     desc_size, mem_size;
@@ -152,7 +152,7 @@ struct cpdma_chan {
  * abstract out these details
  */
 static struct cpdma_desc_pool *
-cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
+cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
                                int size, int align)
 {
        int bitmap_size;
@@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
 
        if (phys) {
                pool->phys  = phys;
-               pool->iomap = ioremap(phys, size);
+               pool->iomap = ioremap(phys, size); /* should be memremap? */
                pool->hw_addr = hw_addr;
        } else {
-               pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+               pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
                                                  GFP_KERNEL);
-               pool->iomap = pool->cpumap;
-               pool->hw_addr = pool->phys;
+               pool->iomap = (void __iomem __force *)pool->cpumap;
+               pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
        }
 
        if (pool->iomap)
index 3c54a2cae5dfd09e066205b945ae937a3cfabaee..67610270d171a69d5e1ecf5e0c027fe98d053bba 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
-#include <asm/pci-bridge.h>
 #include <net/checksum.h>
 
 #include "spider_net.h"
index 7f975a2c8990fcff4491b4171cae626490d45807..b0de8ecd7fe8ed8ab030e22da50aef82e283e4b2 100644 (file)
@@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev)
        const char *print_name = dev_name(bdev);
        struct net_device *dev;
        DFX_board_t       *bp;                  /* board pointer */
-       resource_size_t bar_start[3];           /* pointers to ports */
-       resource_size_t bar_len[3];             /* resource length */
+       resource_size_t bar_start[3] = {0};     /* pointers to ports */
+       resource_size_t bar_len[3] = {0};       /* resource length */
        int alloc_size;                         /* total buffer size used */
        struct resource *region;
        int err = 0;
@@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
        int dfx_bus_pci = dev_is_pci(bdev);
        int dfx_bus_tc = DFX_BUS_TC(bdev);
        int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
-       resource_size_t bar_start[3];           /* pointers to ports */
-       resource_size_t bar_len[3];             /* resource lengths */
+       resource_size_t bar_start[3] = {0};     /* pointers to ports */
+       resource_size_t bar_len[3] = {0};       /* resource lengths */
        int             alloc_size;             /* total buffer size used */
 
        unregister_netdev(dev);
index 7456569f53c1a312d5590dc88072505413126889..028e3873c3107960c55f0bcff71d860fac0cad6e 100644 (file)
@@ -980,9 +980,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                        opts = ip_tunnel_info_opts(info);
 
                if (key->tun_flags & TUNNEL_CSUM)
-                       flags |= GENEVE_F_UDP_CSUM;
+                       flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
                else
-                       flags &= ~GENEVE_F_UDP_CSUM;
+                       flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
 
                err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
                                        info->options_len, opts,
@@ -1039,6 +1039,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        return geneve_xmit_skb(skb, dev, info);
 }
 
+static int geneve_change_mtu(struct net_device *dev, int new_mtu)
+{
+       /* GENEVE overhead is not fixed, so we can't enforce a more
+        * precise max MTU.
+        */
+       if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 {
        struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1094,7 @@ static const struct net_device_ops geneve_netdev_ops = {
        .ndo_stop               = geneve_stop,
        .ndo_start_xmit         = geneve_xmit,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
-       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_change_mtu         = geneve_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_fill_metadata_dst  = geneve_fill_metadata_dst,
@@ -1442,11 +1453,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 
        err = geneve_configure(net, dev, &geneve_remote_unspec,
                               0, 0, 0, htons(dst_port), true, 0);
-       if (err) {
-               free_netdev(dev);
-               return ERR_PTR(err);
-       }
+       if (err)
+               goto err;
+
+       /* openvswitch users expect packet sizes to be unrestricted,
+        * so set the largest MTU we can.
+        */
+       err = geneve_change_mtu(dev, IP_MAX_MTU);
+       if (err)
+               goto err;
+
        return dev;
+
+ err:
+       free_netdev(dev);
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
 
index f4130af09244fb1c6b24f230c7d79cbdaeb43a22..fcb92c0d0eb967e0deba99282152a25c804b73ea 100644 (file)
@@ -624,6 +624,7 @@ struct nvsp_message {
 #define RNDIS_PKT_ALIGN_DEFAULT 8
 
 struct multi_send_data {
+       struct sk_buff *skb; /* skb containing the pkt */
        struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
        u32 count; /* counter of batched packets */
 };
index 059fc523160107239d72080cadb1f2e7c0d246df..ec313fc08d82a3b6a3d8d79fd7a6527caa30733a 100644 (file)
@@ -841,6 +841,18 @@ static inline int netvsc_send_pkt(
        return ret;
 }
 
+/* Move packet out of multi send data (msd), and clear msd */
+static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
+                               struct sk_buff **msd_skb,
+                               struct multi_send_data *msdp)
+{
+       *msd_skb = msdp->skb;
+       *msd_send = msdp->pkt;
+       msdp->skb = NULL;
+       msdp->pkt = NULL;
+       msdp->count = 0;
+}
+
 int netvsc_send(struct hv_device *device,
                struct hv_netvsc_packet *packet,
                struct rndis_message *rndis_msg,
@@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device,
        unsigned int section_index = NETVSC_INVALID_INDEX;
        struct multi_send_data *msdp;
        struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+       struct sk_buff *msd_skb = NULL;
        bool try_batch;
        bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
 
@@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device,
                   net_device->send_section_size) {
                section_index = netvsc_get_next_send_section(net_device);
                if (section_index != NETVSC_INVALID_INDEX) {
-                               msd_send = msdp->pkt;
-                               msdp->pkt = NULL;
-                               msdp->count = 0;
-                               msd_len = 0;
+                       move_pkt_msd(&msd_send, &msd_skb, msdp);
+                       msd_len = 0;
                }
        }
 
@@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device,
                        packet->total_data_buflen += msd_len;
                }
 
-               if (msdp->pkt)
-                       dev_kfree_skb_any(skb);
+               if (msdp->skb)
+                       dev_kfree_skb_any(msdp->skb);
 
                if (xmit_more && !packet->cp_partial) {
+                       msdp->skb = skb;
                        msdp->pkt = packet;
                        msdp->count++;
                } else {
                        cur_send = packet;
+                       msdp->skb = NULL;
                        msdp->pkt = NULL;
                        msdp->count = 0;
                }
        } else {
-               msd_send = msdp->pkt;
-               msdp->pkt = NULL;
-               msdp->count = 0;
+               move_pkt_msd(&msd_send, &msd_skb, msdp);
                cur_send = packet;
        }
 
        if (msd_send) {
-               m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb);
+               m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
 
                if (m_ret != 0) {
                        netvsc_free_send_slot(net_device,
                                              msd_send->send_buf_index);
-                       dev_kfree_skb_any(skb);
+                       dev_kfree_skb_any(msd_skb);
                }
        }
 
index 1c8db9afdcda8c0356db46db9fcfa4d971050b7b..1d3a66563bacbb0628e90693f8f3409c62018d68 100644 (file)
@@ -196,65 +196,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
        return ppi;
 }
 
-union sub_key {
-       u64 k;
-       struct {
-               u8 pad[3];
-               u8 kb;
-               u32 ka;
-       };
-};
-
-/* Toeplitz hash function
- * data: network byte order
- * return: host byte order
- */
-static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
-{
-       union sub_key subk;
-       int k_next = 4;
-       u8 dt;
-       int i, j;
-       u32 ret = 0;
-
-       subk.k = 0;
-       subk.ka = ntohl(*(u32 *)key);
-
-       for (i = 0; i < dlen; i++) {
-               subk.kb = key[k_next];
-               k_next = (k_next + 1) % klen;
-               dt = ((u8 *)data)[i];
-               for (j = 0; j < 8; j++) {
-                       if (dt & 0x80)
-                               ret ^= subk.ka;
-                       dt <<= 1;
-                       subk.k <<= 1;
-               }
-       }
-
-       return ret;
-}
-
-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
-{
-       struct flow_keys flow;
-       int data_len;
-
-       if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
-           !(flow.basic.n_proto == htons(ETH_P_IP) ||
-             flow.basic.n_proto == htons(ETH_P_IPV6)))
-               return false;
-
-       if (flow.basic.ip_proto == IPPROTO_TCP)
-               data_len = 12;
-       else
-               data_len = 8;
-
-       *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
-
-       return true;
-}
-
 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                        void *accel_priv, select_queue_fallback_t fallback)
 {
@@ -267,11 +208,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
        if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
                return 0;
 
-       if (netvsc_set_hash(&hash, skb)) {
-               q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
-                       ndev->real_num_tx_queues;
-               skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
-       }
+       hash = skb_get_hash(skb);
+       q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+               ndev->real_num_tx_queues;
 
        if (!nvsc_dev->chn_table[q_idx])
                q_idx = 0;
index f94392d07126c12fe7a60e0018b6214e9f72a4e2..7a3b41468a55180b1bdfe1be5d494d16eef701b9 100644 (file)
@@ -468,6 +468,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->dev = dev;
        ipvlan->port = port;
        ipvlan->sfeatures = IPVLAN_FEATURES;
+       ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
 
        /* TODO Probably put random address here to be presented to the
index 29cbde8501ed8e09750f771204dfe9fc38503d34..d47cf14bb4a5b43bf5c94e0bdc533e1033f79b2a 100644 (file)
@@ -82,9 +82,6 @@ struct bfin_sir_self {
 
 #define DRIVER_NAME "bfin_sir"
 
-#define port_membase(port)     (((struct bfin_sir_port *)(port))->membase)
-#define get_lsr_cache(port)    (((struct bfin_sir_port *)(port))->lsr)
-#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v))
 #include <asm/bfin_serial.h>
 
 static const unsigned short per[][4] = {
index 6a57a005e0ca8162d2a0b9334440d1b9cb7d75db..94e688805dd262317327a446d8e2e460a668d75a 100644 (file)
@@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 
        list_add_tail_rcu(&vlan->list, &port->vlans);
        netif_stacked_transfer_operstate(lowerdev, dev);
+       linkwatch_fire_event(dev);
 
        return 0;
 
@@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused,
        port = macvlan_port_get_rtnl(dev);
 
        switch (event) {
+       case NETDEV_UP:
        case NETDEV_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list)
                        netif_stacked_transfer_operstate(vlan->lowerdev,
index 60994a83a0d68ca2e4c229fe16140b6017a9dd55..f0a77020037af0790f37b4098e4f6937c6a1ebee 100644 (file)
@@ -186,6 +186,7 @@ config MDIO_GPIO
 config MDIO_OCTEON
        tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
        depends on 64BIT
+       depends on HAS_IOMEM
        help
 
          This module provides a driver for the Octeon and ThunderX MDIO
index 180f6995277944923b2aa57bacdf0bc17b9637ad..7a240fce3a7ea09edc9f10c6b93fadd94b2de3f5 100644 (file)
@@ -846,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
        struct skb_shared_hwtstamps *shhwtstamps = NULL;
        struct sk_buff *skb;
        unsigned long flags;
+       u8 overflow;
+
+       overflow = (phy_rxts->ns_hi >> 14) & 0x3;
+       if (overflow)
+               pr_debug("rx timestamp queue overflow, count %d\n", overflow);
 
        spin_lock_irqsave(&dp83640->rx_lock, flags);
 
@@ -888,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640,
        struct skb_shared_hwtstamps shhwtstamps;
        struct sk_buff *skb;
        u64 ns;
+       u8 overflow;
 
        /* We must already have the skb that triggered this. */
 
@@ -897,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640,
                pr_debug("have timestamp but tx_queue empty\n");
                return;
        }
+
+       overflow = (phy_txts->ns_hi >> 14) & 0x3;
+       if (overflow) {
+               pr_debug("tx timestamp queue overflow, count %d\n", overflow);
+               while (skb) {
+                       skb_complete_tx_timestamp(skb, NULL);
+                       skb = skb_dequeue(&dp83640->tx_queue);
+               }
+               return;
+       }
+
        ns = phy2txts(phy_txts);
        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
        shhwtstamps.hwtstamp = ns_to_ktime(ns);
index 8763bb20988a0edf80091347987fb38086e85496..5590b9c182c967d80a186256162f30690c707506 100644 (file)
@@ -692,25 +692,29 @@ void phy_change(struct work_struct *work)
        struct phy_device *phydev =
                container_of(work, struct phy_device, phy_queue);
 
-       if (phydev->drv->did_interrupt &&
-           !phydev->drv->did_interrupt(phydev))
-               goto ignore;
+       if (phy_interrupt_is_valid(phydev)) {
+               if (phydev->drv->did_interrupt &&
+                   !phydev->drv->did_interrupt(phydev))
+                       goto ignore;
 
-       if (phy_disable_interrupts(phydev))
-               goto phy_err;
+               if (phy_disable_interrupts(phydev))
+                       goto phy_err;
+       }
 
        mutex_lock(&phydev->lock);
        if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
                phydev->state = PHY_CHANGELINK;
        mutex_unlock(&phydev->lock);
 
-       atomic_dec(&phydev->irq_disable);
-       enable_irq(phydev->irq);
+       if (phy_interrupt_is_valid(phydev)) {
+               atomic_dec(&phydev->irq_disable);
+               enable_irq(phydev->irq);
 
-       /* Reenable interrupts */
-       if (PHY_HALTED != phydev->state &&
-           phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
-               goto irq_enable_err;
+               /* Reenable interrupts */
+               if (PHY_HALTED != phydev->state &&
+                   phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
+                       goto irq_enable_err;
+       }
 
        /* reschedule state queue work to run as soon as possible */
        cancel_delayed_work_sync(&phydev->state_queue);
@@ -905,10 +909,10 @@ void phy_state_machine(struct work_struct *work)
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are polling or ignoring
-                * interrupts and link changed since latest checking.
+               /* Only register a CHANGE if we are polling and link changed
+                * since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev)) {
+               if (phydev->irq == PHY_POLL) {
                        old_link = phydev->link;
                        err = phy_read_status(phydev);
                        if (err)
@@ -1000,15 +1004,21 @@ void phy_state_machine(struct work_struct *work)
                   phy_state_to_str(old_state),
                   phy_state_to_str(phydev->state));
 
-       queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
-                          PHY_STATE_TIME * HZ);
+       /* Only re-schedule a PHY state machine change if we are polling the
+        * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
+        * between states from phy_mac_interrupt()
+        */
+       if (phydev->irq == PHY_POLL)
+               queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+                                  PHY_STATE_TIME * HZ);
 }
 
 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
 {
-       cancel_work_sync(&phydev->phy_queue);
        phydev->link = new_link;
-       schedule_work(&phydev->phy_queue);
+
+       /* Trigger a state machine change */
+       queue_work(system_power_efficient_wq, &phydev->phy_queue);
 }
 EXPORT_SYMBOL(phy_mac_interrupt);
 
index e485f2653c8208578d7994284dd12faabcc6c096..2e21e9366f76a184aa2c8d770557e6d9ff0db235 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/smscphy.h>
 
+struct smsc_phy_priv {
+       bool energy_enable;
+};
+
 static int smsc_phy_config_intr(struct phy_device *phydev)
 {
        int rc = phy_write (phydev, MII_LAN83C185_IM,
@@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 
 static int smsc_phy_config_init(struct phy_device *phydev)
 {
-       int __maybe_unused len;
-       struct device *dev __maybe_unused = &phydev->mdio.dev;
-       struct device_node *of_node __maybe_unused = dev->of_node;
+       struct smsc_phy_priv *priv = phydev->priv;
+
        int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
-       int enable_energy = 1;
 
        if (rc < 0)
                return rc;
 
-       if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
-               enable_energy = 0;
-
-       if (enable_energy) {
+       if (priv->energy_enable) {
                /* Enable energy detect mode for this SMSC Transceivers */
                rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
                               rc | MII_LAN83C185_EDPWRDOWN);
@@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev)
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
+       struct smsc_phy_priv *priv = phydev->priv;
+
        int err = genphy_read_status(phydev);
-       int i;
 
-       if (!phydev->link) {
+       if (!phydev->link && priv->energy_enable) {
+               int i;
+
                /* Disable EDPD to wake up PHY */
                int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
                if (rc < 0)
@@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev)
        return err;
 }
 
+static int smsc_phy_probe(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->mdio.dev;
+       struct device_node *of_node = dev->of_node;
+       struct smsc_phy_priv *priv;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->energy_enable = true;
+
+       if (of_property_read_bool(of_node, "smsc,disable-energy-detect"))
+               priv->energy_enable = false;
+
+       phydev->priv = priv;
+
+       return 0;
+}
+
 static struct phy_driver smsc_phy_driver[] = {
 {
        .phy_id         = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -180,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -201,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
@@ -222,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -242,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
@@ -263,6 +295,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
index 90868ca5e3412ffebd28630f571bdddfe389cc35..ae0905ed4a32b55183568e95512c6727458a9f96 100644 (file)
@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
        return i < MAX_CALLID;
 }
 
-static int add_chan(struct pppox_sock *sock)
+static int add_chan(struct pppox_sock *sock,
+                   struct pptp_addr *sa)
 {
        static int call_id;
 
        spin_lock(&chan_lock);
-       if (!sock->proto.pptp.src_addr.call_id) {
+       if (!sa->call_id)       {
                call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
                if (call_id == MAX_CALLID) {
                        call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
                        if (call_id == MAX_CALLID)
                                goto out_err;
                }
-               sock->proto.pptp.src_addr.call_id = call_id;
-       } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+               sa->call_id = call_id;
+       } else if (test_bit(sa->call_id, callid_bitmap)) {
                goto out_err;
+       }
 
-       set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-       rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+       sock->proto.pptp.src_addr = *sa;
+       set_bit(sa->call_id, callid_bitmap);
+       rcu_assign_pointer(callid_sock[sa->call_id], sock);
        spin_unlock(&chan_lock);
 
        return 0;
@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        struct sock *sk = sock->sk;
        struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
        struct pppox_sock *po = pppox_sk(sk);
-       struct pptp_opt *opt = &po->proto.pptp;
        int error = 0;
 
        if (sockaddr_len < sizeof(struct sockaddr_pppox))
@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
 
        lock_sock(sk);
 
-       opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po))
+       if (sk->sk_state & PPPOX_DEAD) {
+               error = -EALREADY;
+               goto out;
+       }
+
+       if (sk->sk_state & PPPOX_BOUND) {
                error = -EBUSY;
+               goto out;
+       }
+
+       if (add_chan(po, &sp->sa_addr.pptp))
+               error = -EBUSY;
+       else
+               sk->sk_state |= PPPOX_BOUND;
 
+out:
        release_sock(sk);
        return error;
 }
@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
        }
 
        opt->dst_addr = sp->sa_addr.pptp;
-       sk->sk_state = PPPOX_CONNECTED;
+       sk->sk_state |= PPPOX_CONNECTED;
 
  end:
        release_sock(sk);
index 718ceeab4dbcf397adb5bc3a6d8aa8dcf029602c..00558e1395847d51bccd4a35050c9ae1bb79f52a 100644 (file)
@@ -758,6 +758,8 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
                u64_stats_update_end(&pcpu_stats->syncp);
 
                skb->dev = team->dev;
+       } else if (res == RX_HANDLER_EXACT) {
+               this_cpu_inc(team->pcpu_stats->rx_nohandler);
        } else {
                this_cpu_inc(team->pcpu_stats->rx_dropped);
        }
@@ -1807,7 +1809,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        struct team *team = netdev_priv(dev);
        struct team_pcpu_stats *p;
        u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
-       u32 rx_dropped = 0, tx_dropped = 0;
+       u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
        unsigned int start;
        int i;
 
@@ -1828,14 +1830,16 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_packets       += tx_packets;
                stats->tx_bytes         += tx_bytes;
                /*
-                * rx_dropped & tx_dropped are u32, updated
-                * without syncp protection.
+                * rx_dropped, tx_dropped & rx_nohandler are u32,
+                * updated without syncp protection.
                 */
                rx_dropped      += p->rx_dropped;
                tx_dropped      += p->tx_dropped;
+               rx_nohandler    += p->rx_nohandler;
        }
        stats->rx_dropped       = rx_dropped;
        stats->tx_dropped       = tx_dropped;
+       stats->rx_nohandler     = rx_nohandler;
        return stats;
 }
 
index 2ed53331bfb28de22019754cb46a8a27c35c43ad..1c299b8a162d7243c5ae94b96618c3763083e91e 100644 (file)
@@ -36,7 +36,7 @@
 #define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
 #define DRIVER_DESC    "LAN78XX USB 3.0 Gigabit Ethernet Devices"
 #define DRIVER_NAME    "lan78xx"
-#define DRIVER_VERSION "1.0.1"
+#define DRIVER_VERSION "1.0.2"
 
 #define TX_TIMEOUT_JIFFIES             (5 * HZ)
 #define THROTTLE_JIFFIES               (HZ / 8)
@@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
                                   u32 length, u8 *data)
 {
        u32 val;
+       u32 saved;
        int i, ret;
+       int retval;
 
-       ret = lan78xx_eeprom_confirm_not_busy(dev);
-       if (ret)
-               return ret;
+       /* depends on chip, some EEPROM pins are muxed with LED function.
+        * disable & restore LED function to access EEPROM.
+        */
+       ret = lan78xx_read_reg(dev, HW_CFG, &val);
+       saved = val;
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+               val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
+               ret = lan78xx_write_reg(dev, HW_CFG, val);
+       }
+
+       retval = lan78xx_eeprom_confirm_not_busy(dev);
+       if (retval)
+               return retval;
 
        for (i = 0; i < length; i++) {
                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
                ret = lan78xx_write_reg(dev, E2P_CMD, val);
-               if (unlikely(ret < 0))
-                       return -EIO;
+               if (unlikely(ret < 0)) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
-               ret = lan78xx_wait_eeprom(dev);
-               if (ret < 0)
-                       return ret;
+               retval = lan78xx_wait_eeprom(dev);
+               if (retval < 0)
+                       goto exit;
 
                ret = lan78xx_read_reg(dev, E2P_DATA, &val);
-               if (unlikely(ret < 0))
-                       return -EIO;
+               if (unlikely(ret < 0)) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
                data[i] = val & 0xFF;
                offset++;
        }
 
-       return 0;
+       retval = 0;
+exit:
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+               ret = lan78xx_write_reg(dev, HW_CFG, saved);
+
+       return retval;
 }
 
 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
                                    u32 length, u8 *data)
 {
        u32 val;
+       u32 saved;
        int i, ret;
+       int retval;
 
-       ret = lan78xx_eeprom_confirm_not_busy(dev);
-       if (ret)
-               return ret;
+       /* depends on chip, some EEPROM pins are muxed with LED function.
+        * disable & restore LED function to access EEPROM.
+        */
+       ret = lan78xx_read_reg(dev, HW_CFG, &val);
+       saved = val;
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+               val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
+               ret = lan78xx_write_reg(dev, HW_CFG, val);
+       }
+
+       retval = lan78xx_eeprom_confirm_not_busy(dev);
+       if (retval)
+               goto exit;
 
        /* Issue write/erase enable command */
        val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
        ret = lan78xx_write_reg(dev, E2P_CMD, val);
-       if (unlikely(ret < 0))
-               return -EIO;
+       if (unlikely(ret < 0)) {
+               retval = -EIO;
+               goto exit;
+       }
 
-       ret = lan78xx_wait_eeprom(dev);
-       if (ret < 0)
-               return ret;
+       retval = lan78xx_wait_eeprom(dev);
+       if (retval < 0)
+               goto exit;
 
        for (i = 0; i < length; i++) {
                /* Fill data register */
                val = data[i];
                ret = lan78xx_write_reg(dev, E2P_DATA, val);
-               if (ret < 0)
-                       return ret;
+               if (ret < 0) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
                /* Send "write" command */
                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
                ret = lan78xx_write_reg(dev, E2P_CMD, val);
-               if (ret < 0)
-                       return ret;
+               if (ret < 0) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
-               ret = lan78xx_wait_eeprom(dev);
-               if (ret < 0)
-                       return ret;
+               retval = lan78xx_wait_eeprom(dev);
+               if (retval < 0)
+                       goto exit;
 
                offset++;
        }
 
-       return 0;
+       retval = 0;
+exit:
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+               ret = lan78xx_write_reg(dev, HW_CFG, saved);
+
+       return retval;
 }
 
 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
@@ -904,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 
        if (!phydev->link && dev->link_on) {
                dev->link_on = false;
-               netif_carrier_off(dev->net);
 
                /* reset MAC */
                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
@@ -914,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                ret = lan78xx_write_reg(dev, MAC_CR, buf);
                if (unlikely(ret < 0))
                        return -EIO;
+
+               phy_mac_interrupt(phydev, 0);
        } else if (phydev->link && !dev->link_on) {
                dev->link_on = true;
 
@@ -953,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                          ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
 
                ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
-               netif_carrier_on(dev->net);
+               phy_mac_interrupt(phydev, 1);
        }
 
        return ret;
@@ -1495,7 +1540,6 @@ done:
 static int lan78xx_mdio_init(struct lan78xx_net *dev)
 {
        int ret;
-       int i;
 
        dev->mdiobus = mdiobus_alloc();
        if (!dev->mdiobus) {
@@ -1511,10 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
        snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
                 dev->udev->bus->busnum, dev->udev->devnum);
 
-       /* handle our own interrupt */
-       for (i = 0; i < PHY_MAX_ADDR; i++)
-               dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
-
        switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
        case 0x78000000:
        case 0x78500000:
@@ -1558,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
                return -EIO;
        }
 
+       /* Enable PHY interrupts.
+        * We handle our own interrupt
+        */
+       ret = phy_read(phydev, LAN88XX_INT_STS);
+       ret = phy_write(phydev, LAN88XX_INT_MASK,
+                       LAN88XX_INT_MASK_MDINTPIN_EN_ |
+                       LAN88XX_INT_MASK_LINK_CHANGE_);
+
+       phydev->irq = PHY_IGNORE_INTERRUPT;
+
        ret = phy_connect_direct(dev->net, phydev,
                                 lan78xx_link_status_change,
                                 PHY_INTERFACE_MODE_GMII);
@@ -1580,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
                              SUPPORTED_Pause | SUPPORTED_Asym_Pause);
        genphy_config_aneg(phydev);
 
-       /* Workaround to enable PHY interrupt.
-        * phy_start_interrupts() is API for requesting and enabling
-        * PHY interrupt. However, USB-to-Ethernet device can't use
-        * request_irq() called in phy_start_interrupts().
-        * Set PHY to PHY_HALTED and call phy_start()
-        * to make a call to phy_enable_interrupts()
-        */
-       phy_stop(phydev);
        phy_start(phydev);
 
        netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@@ -2221,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
        if (skb2) {
                skb_queue_tail(&dev->txq_pend, skb2);
 
-               if (skb_queue_len(&dev->txq_pend) > 10)
+               /* throttle TX patch at slower than SUPER SPEED USB */
+               if ((dev->udev->speed < USB_SPEED_SUPER) &&
+                   (skb_queue_len(&dev->txq_pend) > 10))
                        netif_stop_queue(net);
        } else {
                netif_dbg(dev, tx_err, dev->net,
index 767ab11a6e9f67ce774d086101daeb1eb91838ce..c9fd52a8e6ec5f05d3459ba26994a449df764ec6 100644 (file)
@@ -146,6 +146,10 @@ struct virtnet_info {
        virtio_net_ctrl_ack ctrl_status;
        u8 ctrl_promisc;
        u8 ctrl_allmulti;
+
+       /* Ethtool settings */
+       u8 duplex;
+       u32 speed;
 };
 
 struct padded_vnet_hdr {
@@ -1376,6 +1380,58 @@ static void virtnet_get_channels(struct net_device *dev,
        channels->other_count = 0;
 }
 
+/* Check if the user is trying to change anything besides speed/duplex */
+static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
+{
+       struct ethtool_cmd diff1 = *cmd;
+       struct ethtool_cmd diff2 = {};
+
+       /* advertising and cmd are usually set, ignore port because we set it */
+       ethtool_cmd_speed_set(&diff1, 0);
+       diff1.advertising = 0;
+       diff1.duplex = 0;
+       diff1.port = 0;
+       diff1.cmd = 0;
+
+       return !memcmp(&diff1, &diff2, sizeof(diff1));
+}
+
+static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       u32 speed;
+
+       speed = ethtool_cmd_speed(cmd);
+       /* don't allow custom speed and duplex */
+       if (!ethtool_validate_speed(speed) ||
+           !ethtool_validate_duplex(cmd->duplex) ||
+           !virtnet_validate_ethtool_cmd(cmd))
+               return -EINVAL;
+       vi->speed = speed;
+       vi->duplex = cmd->duplex;
+
+       return 0;
+}
+
+static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       ethtool_cmd_speed_set(cmd, vi->speed);
+       cmd->duplex = vi->duplex;
+       cmd->port = PORT_OTHER;
+
+       return 0;
+}
+
+static void virtnet_init_settings(struct net_device *dev)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       vi->speed = SPEED_UNKNOWN;
+       vi->duplex = DUPLEX_UNKNOWN;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
        .get_drvinfo = virtnet_get_drvinfo,
        .get_link = ethtool_op_get_link,
@@ -1383,6 +1439,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_channels = virtnet_set_channels,
        .get_channels = virtnet_get_channels,
        .get_ts_info = ethtool_op_get_ts_info,
+       .get_settings = virtnet_get_settings,
+       .set_settings = virtnet_set_settings,
 };
 
 #define MIN_MTU 68
@@ -1855,6 +1913,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
        netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
 
+       virtnet_init_settings(dev);
+
        err = register_netdev(dev);
        if (err) {
                pr_debug("virtio_net: registering device failed\n");
index 66addb7a7911beb33f17c916caa7bb48ae84507e..76e1fc9d8748e61d2b9cfbab91e3adf862e22de1 100644 (file)
@@ -877,6 +877,24 @@ static int vrf_fillinfo(struct sk_buff *skb,
        return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
 }
 
+static size_t vrf_get_slave_size(const struct net_device *bond_dev,
+                                const struct net_device *slave_dev)
+{
+       return nla_total_size(sizeof(u32));  /* IFLA_VRF_PORT_TABLE */
+}
+
+static int vrf_fill_slave_info(struct sk_buff *skb,
+                              const struct net_device *vrf_dev,
+                              const struct net_device *slave_dev)
+{
+       struct net_vrf *vrf = netdev_priv(vrf_dev);
+
+       if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
        [IFLA_VRF_TABLE] = { .type = NLA_U32 },
 };
@@ -890,6 +908,9 @@ static struct rtnl_link_ops vrf_link_ops __read_mostly = {
        .validate       = vrf_validate,
        .fill_info      = vrf_fillinfo,
 
+       .get_slave_size  = vrf_get_slave_size,
+       .fill_slave_info = vrf_fill_slave_info,
+
        .newlink        = vrf_newlink,
        .dellink        = vrf_dellink,
        .setup          = vrf_setup,
index 2d88c799d2ac32a44c2bc304e19280f2e471ad7b..57d219fc3d644d8a87b63a9e662870e5e1e3adbd 100644 (file)
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 static int vxlan_net_id;
 static struct rtnl_link_ops vxlan_link_ops;
 
-static const u8 all_zeros_mac[ETH_ALEN];
+static const u8 all_zeros_mac[ETH_ALEN + 2];
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
@@ -1684,18 +1684,14 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
        gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
-                          struct sk_buff *skb,
-                          struct net_device *dev, struct in6_addr *saddr,
-                          struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port, __be32 vni,
-                          struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
+                          int iphdr_len, __be32 vni,
+                          struct vxlan_metadata *md, u32 vxflags,
+                          bool udp_sum)
 {
        struct vxlanhdr *vxh;
        int min_headroom;
        int err;
-       bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
        int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
        u16 hdrlen = sizeof(struct vxlanhdr);
 
@@ -1712,93 +1708,8 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
                }
        }
 
-       skb_scrub_packet(skb, xnet);
-
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
-                       + VXLAN_HLEN + sizeof(struct ipv6hdr)
-                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
-       /* Need space for new headers (invalidates iph ptr) */
-       err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
-               goto err;
-       }
-
-       skb = vlan_hwaccel_push_inside(skb);
-       if (WARN_ON(!skb)) {
-               err = -ENOMEM;
-               goto err;
-       }
-
-       skb = iptunnel_handle_offloads(skb, udp_sum, type);
-       if (IS_ERR(skb)) {
-               err = -EINVAL;
-               goto err;
-       }
-
-       vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_HF_VNI);
-       vxh->vx_vni = vni;
-
-       if (type & SKB_GSO_TUNNEL_REMCSUM) {
-               u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
-                          VXLAN_RCO_SHIFT;
-
-               if (skb->csum_offset == offsetof(struct udphdr, check))
-                       data |= VXLAN_RCO_UDP;
-
-               vxh->vx_vni |= htonl(data);
-               vxh->vx_flags |= htonl(VXLAN_HF_RCO);
-
-               if (!skb_is_gso(skb)) {
-                       skb->ip_summed = CHECKSUM_NONE;
-                       skb->encapsulation = 0;
-               }
-       }
-
-       if (vxflags & VXLAN_F_GBP)
-               vxlan_build_gbp_hdr(vxh, vxflags, md);
-
-       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-       udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
-                            ttl, src_port, dst_port,
-                            !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
-       return 0;
-err:
-       dst_release(dst);
-       return err;
-}
-#endif
-
-static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-                         __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                         __be16 src_port, __be16 dst_port, __be32 vni,
-                         struct vxlan_metadata *md, bool xnet, u32 vxflags)
-{
-       struct vxlanhdr *vxh;
-       int min_headroom;
-       int err;
-       bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
-       int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-       u16 hdrlen = sizeof(struct vxlanhdr);
-
-       if ((vxflags & VXLAN_F_REMCSUM_TX) &&
-           skb->ip_summed == CHECKSUM_PARTIAL) {
-               int csum_start = skb_checksum_start_offset(skb);
-
-               if (csum_start <= VXLAN_MAX_REMCSUM_START &&
-                   !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
-                   (skb->csum_offset == offsetof(struct udphdr, check) ||
-                    skb->csum_offset == offsetof(struct tcphdr, check))) {
-                       udp_sum = false;
-                       type |= SKB_GSO_TUNNEL_REMCSUM;
-               }
-       }
-
-       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-                       + VXLAN_HLEN + sizeof(struct iphdr)
+                       + VXLAN_HLEN + iphdr_len
                        + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        /* Need space for new headers (invalidates iph ptr) */
@@ -1840,13 +1751,30 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
                vxlan_build_gbp_hdr(vxh, vxflags, md);
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-       udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df,
-                           src_port, dst_port, xnet,
-                           !(vxflags & VXLAN_F_UDP_CSUM));
        return 0;
 }
 
+static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
+                                     struct sk_buff *skb, int oif, u8 tos,
+                                     __be32 daddr, __be32 *saddr)
+{
+       struct rtable *rt = NULL;
+       struct flowi4 fl4;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.flowi4_oif = oif;
+       fl4.flowi4_tos = RT_TOS(tos);
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_proto = IPPROTO_UDP;
+       fl4.daddr = daddr;
+       fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+
+       rt = ip_route_output_key(vxlan->net, &fl4);
+       if (!IS_ERR(rt))
+               *saddr = fl4.saddr;
+       return rt;
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
                                          struct sk_buff *skb, int oif,
@@ -1928,7 +1856,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct sock *sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
-       struct flowi4 fl4;
        union vxlan_addr *dst;
        union vxlan_addr remote_ip;
        struct vxlan_metadata _md;
@@ -1939,6 +1866,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        __u8 tos, ttl;
        int err;
        u32 flags = vxlan->flags;
+       bool udp_sum = false;
+       bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
 
        info = skb_tunnel_info(skb);
 
@@ -1985,13 +1914,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                     vxlan->cfg.port_max, true);
 
        if (info) {
-               if (info->key.tun_flags & TUNNEL_CSUM)
-                       flags |= VXLAN_F_UDP_CSUM;
-               else
-                       flags &= ~VXLAN_F_UDP_CSUM;
-
                ttl = info->key.ttl;
                tos = info->key.tos;
+               udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
 
                if (info->options_len)
                        md = ip_tunnel_info_opts(info);
@@ -2000,22 +1925,22 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->sa.sa_family == AF_INET) {
+               __be32 saddr;
+
                if (!vxlan->vn4_sock)
                        goto drop;
                sk = vxlan->vn4_sock->sock->sk;
 
-               if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
-                       df = htons(IP_DF);
-
-               memset(&fl4, 0, sizeof(fl4));
-               fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
-               fl4.flowi4_tos = RT_TOS(tos);
-               fl4.flowi4_mark = skb->mark;
-               fl4.flowi4_proto = IPPROTO_UDP;
-               fl4.daddr = dst->sin.sin_addr.s_addr;
-               fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+               if (info) {
+                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+                               df = htons(IP_DF);
+               } else {
+                       udp_sum = !!(flags & VXLAN_F_UDP_CSUM);
+               }
 
-               rt = ip_route_output_key(vxlan->net, &fl4);
+               rt = vxlan_get_route(vxlan, skb,
+                                    rdst ? rdst->remote_ifindex : 0, tos,
+                                    dst->sin.sin_addr.s_addr, &saddr);
                if (IS_ERR(rt)) {
                        netdev_dbg(dev, "no route to %pI4\n",
                                   &dst->sin.sin_addr.s_addr);
@@ -2047,16 +1972,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-               err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
-                                    dst->sin.sin_addr.s_addr, tos, ttl, df,
-                                    src_port, dst_port, htonl(vni << 8), md,
-                                    !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                    flags);
-               if (err < 0) {
-                       /* skb is already freed. */
-                       skb = NULL;
-                       goto rt_tx_error;
-               }
+               err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
+                                     htonl(vni << 8), md, flags, udp_sum);
+               if (err < 0)
+                       goto xmit_tx_error;
+
+               udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+                                   dst->sin.sin_addr.s_addr, tos, ttl, df,
+                                   src_port, dst_port, xnet, !udp_sum);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
                struct dst_entry *ndst;
@@ -2101,11 +2024,20 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        return;
                }
 
+               if (!info)
+                       udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
-               err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
-                                     0, ttl, src_port, dst_port, htonl(vni << 8), md,
-                                     !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                     flags);
+               skb_scrub_packet(skb, xnet);
+               err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
+                                     htonl(vni << 8), md, flags, udp_sum);
+               if (err < 0) {
+                       dst_release(ndst);
+                       return;
+               }
+               udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
+                                    &saddr, &dst->sin6.sin6_addr,
+                                    0, ttl, src_port, dst_port, !udp_sum);
 #endif
        }
 
@@ -2115,6 +2047,9 @@ drop:
        dev->stats.tx_dropped++;
        goto tx_free;
 
+xmit_tx_error:
+       /* skb is already freed. */
+       skb = NULL;
 rt_tx_error:
        ip_rt_put(rt);
 tx_error:
@@ -2358,52 +2293,41 @@ static void vxlan_set_multicast_list(struct net_device *dev)
 {
 }
 
-static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+static int __vxlan_change_mtu(struct net_device *dev,
+                             struct net_device *lowerdev,
+                             struct vxlan_rdst *dst, int new_mtu, bool strict)
 {
-       struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_rdst *dst = &vxlan->default_dst;
-       struct net_device *lowerdev;
-       int max_mtu;
+       int max_mtu = IP_MAX_MTU;
 
-       lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
-       if (lowerdev == NULL)
-               return eth_change_mtu(dev, new_mtu);
+       if (lowerdev)
+               max_mtu = lowerdev->mtu;
 
        if (dst->remote_ip.sa.sa_family == AF_INET6)
-               max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+               max_mtu -= VXLAN6_HEADROOM;
        else
-               max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+               max_mtu -= VXLAN_HEADROOM;
 
-       if (new_mtu < 68 || new_mtu > max_mtu)
+       if (new_mtu < 68)
                return -EINVAL;
 
+       if (new_mtu > max_mtu) {
+               if (strict)
+                       return -EINVAL;
+
+               new_mtu = max_mtu;
+       }
+
        dev->mtu = new_mtu;
        return 0;
 }
 
-static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
-                               struct ip_tunnel_info *info,
-                               __be16 sport, __be16 dport)
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct rtable *rt;
-       struct flowi4 fl4;
-
-       memset(&fl4, 0, sizeof(fl4));
-       fl4.flowi4_tos = RT_TOS(info->key.tos);
-       fl4.flowi4_mark = skb->mark;
-       fl4.flowi4_proto = IPPROTO_UDP;
-       fl4.daddr = info->key.u.ipv4.dst;
-
-       rt = ip_route_output_key(vxlan->net, &fl4);
-       if (IS_ERR(rt))
-               return PTR_ERR(rt);
-       ip_rt_put(rt);
-
-       info->key.u.ipv4.src = fl4.saddr;
-       info->key.tp_src = sport;
-       info->key.tp_dst = dport;
-       return 0;
+       struct vxlan_rdst *dst = &vxlan->default_dst;
+       struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+                                                        dst->remote_ifindex);
+       return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
 }
 
 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
@@ -2417,9 +2341,16 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
        dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
 
        if (ip_tunnel_info_af(info) == AF_INET) {
+               struct rtable *rt;
+
                if (!vxlan->vn4_sock)
                        return -EINVAL;
-               return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+               rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
+                                    info->key.u.ipv4.dst,
+                                    &info->key.u.ipv4.src);
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
+               ip_rt_put(rt);
        } else {
 #if IS_ENABLED(CONFIG_IPV6)
                struct dst_entry *ndst;
@@ -2432,13 +2363,12 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
                if (IS_ERR(ndst))
                        return PTR_ERR(ndst);
                dst_release(ndst);
-
-               info->key.tp_src = sport;
-               info->key.tp_dst = dport;
 #else /* !CONFIG_IPV6 */
                return -EPFNOSUPPORT;
 #endif
        }
+       info->key.tp_src = sport;
+       info->key.tp_dst = dport;
        return 0;
 }
 
@@ -2756,6 +2686,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        int err;
        bool use_ipv6 = false;
        __be16 default_port = vxlan->cfg.dst_port;
+       struct net_device *lowerdev = NULL;
 
        vxlan->net = src_net;
 
@@ -2776,9 +2707,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        }
 
        if (conf->remote_ifindex) {
-               struct net_device *lowerdev
-                        = __dev_get_by_index(src_net, conf->remote_ifindex);
-
+               lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
                dst->remote_ifindex = conf->remote_ifindex;
 
                if (!lowerdev) {
@@ -2802,6 +2731,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                needed_headroom = lowerdev->hard_header_len;
        }
 
+       if (conf->mtu) {
+               err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
+               if (err)
+                       return err;
+       }
+
        if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
                needed_headroom += VXLAN6_HEADROOM;
        else
index 6146a293601a7fea098a353aeb11fc803f5523a4..368de5e5a04f64c46003a131a49546724d6fc7a6 100644 (file)
@@ -6366,12 +6366,13 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
-                              enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                              u8 buf_size, bool amsdu)
+                              struct ieee80211_ampdu_params *params)
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
                   arvif->vdev_id, sta->addr, tid, action);
index a7afdeee698c690b9decf1e5da83c884071d9690..73fb4232f9f28c09e6a3f44090dc401684be8747 100644 (file)
@@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
                return -EIO;
        }
 
-       if (magic == AR5416_EEPROM_MAGIC) {
-               *swap_needed = false;
-       } else if (swab16(magic) == AR5416_EEPROM_MAGIC) {
+       *swap_needed = false;
+       if (swab16(magic) == AR5416_EEPROM_MAGIC) {
                if (ah->ah_flags & AH_NO_EEP_SWAP) {
                        ath_info(common,
                                 "Ignoring endianness difference in EEPROM magic bytes.\n");
-
-                       *swap_needed = false;
                } else {
                        *swap_needed = true;
                }
-       } else {
+       } else if (magic != AR5416_EEPROM_MAGIC) {
+               if (ath9k_hw_use_flash(ah))
+                       return 0;
+
                ath_err(common,
                        "Invalid EEPROM Magic (0x%04x).\n", magic);
                return -EINVAL;
index fe1fd1a5ae1502a2c5d2947d1a40ff3ee6193d76..639294a9e34df6b2461b4c4c8052af75cbf2b258 100644 (file)
@@ -1657,13 +1657,14 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw,
 
 static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
-                                 enum ieee80211_ampdu_mlme_action action,
-                                 struct ieee80211_sta *sta,
-                                 u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+                                 struct ieee80211_ampdu_params *params)
 {
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath9k_htc_sta *ista;
        int ret = 0;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
 
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
index c1b33fdcca0875a20cd2be3b54b21e46c37b725c..cf58a304e9f0aa08d577dc00d49361b568f450d3 100644 (file)
@@ -1864,14 +1864,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif,
-                             enum ieee80211_ampdu_mlme_action action,
-                             struct ieee80211_sta *sta,
-                             u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+                             struct ieee80211_ampdu_params *params)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        bool flush = false;
        int ret = 0;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        mutex_lock(&sc->mutex);
 
index 19d3d64416bf66825eb113590474b4e4a68f8ec2..4d1527a2e292a2ba2d29907554625cf0fbb980f4 100644 (file)
@@ -1413,10 +1413,12 @@ static void carl9170_ampdu_work(struct work_struct *work)
 
 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
-                                   enum ieee80211_ampdu_mlme_action action,
-                                   struct ieee80211_sta *sta,
-                                   u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+                                   struct ieee80211_ampdu_params *params)
 {
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
        struct ar9170 *ar = hw->priv;
        struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
        struct carl9170_sta_tid *tid_info;
index 7c169abdbafee95ccfe8f461e670fbb8ab5ca5eb..a27279c2c6950913c0b1f14fd4cacdd3bc650428 100644 (file)
@@ -857,12 +857,14 @@ static int wcn36xx_resume(struct ieee80211_hw *hw)
 
 static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
                    struct ieee80211_vif *vif,
-                   enum ieee80211_ampdu_mlme_action action,
-                   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                   u8 buf_size, bool amsdu)
+                   struct ieee80211_ampdu_params *params)
 {
        struct wcn36xx *wcn = hw->priv;
        struct wcn36xx_sta *sta_priv = NULL;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
                    action, tid);
index 20d07ef679e89d467170abae532819f36f283821..1f231cd0813861fa24bafdb378e2e8cbb0ed3ae1 100644 (file)
@@ -422,6 +422,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        if (sme->privacy && !rsn_eid)
                wil_info(wil, "WSC connection\n");
 
+       if (sme->pbss) {
+               wil_err(wil, "connect - PBSS not yet supported\n");
+               return -EOPNOTSUPP;
+       }
+
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
                               IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
@@ -870,6 +875,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                return -EINVAL;
        }
 
+       if (info->pbss) {
+               wil_err(wil, "AP: PBSS not yet supported\n");
+               return -EOPNOTSUPP;
+       }
+
        switch (info->hidden_ssid) {
        case NL80211_HIDDEN_SSID_NOT_IN_USE:
                hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
index ec013fbd6a81cda4a0cce04a74b469c1f3cda532..c279211e49f91440c7cd48a52feb93a9cbd3580d 100644 (file)
@@ -1215,10 +1215,10 @@ void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev)
        case B43_BUS_BCMA:
                bcma_cc = &dev->dev->bdev->bus->drv_cc;
 
-               bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0);
-               bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4);
-               bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4);
-               bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4);
+               bcma_cc_write32(bcma_cc, BCMA_CC_PMU_CHIPCTL_ADDR, 0);
+               bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4);
+               bcma_cc_set32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, 0x4);
+               bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4);
                break;
 #endif
 #ifdef CONFIG_B43_SSB
index 53637399bb99d0743663c50cd674e2a1711cfc3b..b98db8a0a069bdf71e3c09dcf873aa6905700823 100644 (file)
@@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
        return 0;
 }
 
-static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
 {
+       struct sdio_func *func;
+       struct mmc_host *host;
+       uint max_blocks;
        uint nents;
        int err;
 
+       func = sdiodev->func[2];
+       host = func->card->host;
+       sdiodev->sg_support = host->max_segs > 1;
+       max_blocks = min_t(uint, host->max_blk_count, 511u);
+       sdiodev->max_request_size = min_t(uint, host->max_req_size,
+                                         max_blocks * func->cur_blksize);
+       sdiodev->max_segment_count = min_t(uint, host->max_segs,
+                                          SG_MAX_SINGLE_ALLOC);
+       sdiodev->max_segment_size = host->max_seg_size;
+
        if (!sdiodev->sg_support)
                return;
 
@@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
 
 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
-       struct sdio_func *func;
-       struct mmc_host *host;
-       uint max_blocks;
        int ret = 0;
 
        sdiodev->num_funcs = 2;
@@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                goto out;
        }
 
-       /*
-        * determine host related variables after brcmf_sdiod_probe()
-        * as func->cur_blksize is properly set and F2 init has been
-        * completed successfully.
-        */
-       func = sdiodev->func[2];
-       host = func->card->host;
-       sdiodev->sg_support = host->max_segs > 1;
-       max_blocks = min_t(uint, host->max_blk_count, 511u);
-       sdiodev->max_request_size = min_t(uint, host->max_req_size,
-                                         max_blocks * func->cur_blksize);
-       sdiodev->max_segment_count = min_t(uint, host->max_segs,
-                                          SG_MAX_SINGLE_ALLOC);
-       sdiodev->max_segment_size = host->max_seg_size;
-
-       /* allocate scatter-gather table. sg support
-        * will be disabled upon allocation failure.
-        */
-       brcmf_sdiod_sgtable_alloc(sdiodev);
-
        ret = brcmf_sdiod_freezer_attach(sdiodev);
        if (ret)
                goto out;
@@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                ret = -ENODEV;
                goto out;
        }
-       brcmf_sdiod_host_fixup(host);
+       brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
 out:
        if (ret)
                brcmf_sdiod_remove(sdiodev);
index 7b01e4ddb315b70ca0432371ecdb695808c2a4ac..d00c5c1d58bf1ce3c13c56cc22d40429be2d4daa 100644 (file)
@@ -247,7 +247,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
        brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
                  ch->chan->center_freq, ch->center_freq1, ch->width);
        ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
-       primary_offset = ch->center_freq1 - ch->chan->center_freq;
+       primary_offset = ch->chan->center_freq - ch->center_freq1;
        switch (ch->width) {
        case NL80211_CHAN_WIDTH_20:
        case NL80211_CHAN_WIDTH_20_NOHT:
@@ -256,24 +256,21 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
                break;
        case NL80211_CHAN_WIDTH_40:
                ch_inf.bw = BRCMU_CHAN_BW_40;
-               if (primary_offset < 0)
+               if (primary_offset > 0)
                        ch_inf.sb = BRCMU_CHAN_SB_U;
                else
                        ch_inf.sb = BRCMU_CHAN_SB_L;
                break;
        case NL80211_CHAN_WIDTH_80:
                ch_inf.bw = BRCMU_CHAN_BW_80;
-               if (primary_offset < 0) {
-                       if (primary_offset < -CH_10MHZ_APART)
-                               ch_inf.sb = BRCMU_CHAN_SB_UU;
-                       else
-                               ch_inf.sb = BRCMU_CHAN_SB_UL;
-               } else {
-                       if (primary_offset > CH_10MHZ_APART)
-                               ch_inf.sb = BRCMU_CHAN_SB_LL;
-                       else
-                               ch_inf.sb = BRCMU_CHAN_SB_LU;
-               }
+               if (primary_offset == -30)
+                       ch_inf.sb = BRCMU_CHAN_SB_LL;
+               else if (primary_offset == -10)
+                       ch_inf.sb = BRCMU_CHAN_SB_LU;
+               else if (primary_offset == 10)
+                       ch_inf.sb = BRCMU_CHAN_SB_UL;
+               else
+                       ch_inf.sb = BRCMU_CHAN_SB_UU;
                break;
        case NL80211_CHAN_WIDTH_80P80:
        case NL80211_CHAN_WIDTH_160:
index 82e4382eb177c98733f79b79f6dc6e78af3066d7..0e8f2a079907d7c9deb8cef9eaafa61877eaf1b3 100644 (file)
@@ -803,7 +803,14 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
                                *eromaddr -= 4;
                                return -EFAULT;
                        }
-               } while (desc != DMP_DESC_ADDRESS);
+               } while (desc != DMP_DESC_ADDRESS &&
+                        desc != DMP_DESC_COMPONENT);
+
+               /* stop if we crossed current component border */
+               if (desc == DMP_DESC_COMPONENT) {
+                       *eromaddr -= 4;
+                       return 0;
+               }
 
                /* skip upper 32-bit address descriptor */
                if (val & DMP_DESC_ADDRSIZE_GT32)
@@ -876,7 +883,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
                rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
 
                /* need core with ports */
-               if (nmw + nsw == 0)
+               if (nmw + nsw == 0 &&
+                   id != BCMA_CORE_PMU)
                        continue;
 
                /* try to obtain register address info */
@@ -1006,6 +1014,7 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
 {
        struct brcmf_chip *pub;
        struct brcmf_core_priv *cc;
+       struct brcmf_core *pmu;
        u32 base;
        u32 val;
        int ret = 0;
@@ -1017,11 +1026,15 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
        /* get chipcommon capabilites */
        pub->cc_caps = chip->ops->read32(chip->ctx,
                                         CORE_CC_REG(base, capabilities));
+       pub->cc_caps_ext = chip->ops->read32(chip->ctx,
+                                            CORE_CC_REG(base,
+                                                        capabilities_ext));
 
        /* get pmu caps & rev */
+       pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */
        if (pub->cc_caps & CC_CAP_PMU) {
                val = chip->ops->read32(chip->ctx,
-                                       CORE_CC_REG(base, pmucapabilities));
+                                       CORE_CC_REG(pmu->base, pmucapabilities));
                pub->pmurev = val & PCAP_REV_MASK;
                pub->pmucaps = val;
        }
@@ -1120,6 +1133,23 @@ struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
        return &cc->pub;
 }
 
+struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub)
+{
+       struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub);
+       struct brcmf_core *pmu;
+
+       /* See if there is separated PMU core available */
+       if (cc->rev >= 35 &&
+           pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+               pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU);
+               if (pmu)
+                       return pmu;
+       }
+
+       /* Fallback to ChipCommon core for older hardware */
+       return cc;
+}
+
 bool brcmf_chip_iscoreup(struct brcmf_core *pub)
 {
        struct brcmf_core_priv *core;
@@ -1290,6 +1320,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
 {
        u32 base, addr, reg, pmu_cc3_mask = ~0;
        struct brcmf_chip_priv *chip;
+       struct brcmf_core *pmu = brcmf_chip_get_pmu(pub);
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -1309,9 +1340,9 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
        case BRCM_CC_4335_CHIP_ID:
        case BRCM_CC_4339_CHIP_ID:
                /* read PMU chipcontrol register 3 */
-               addr = CORE_CC_REG(base, chipcontrol_addr);
+               addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
                chip->ops->write32(chip->ctx, addr, 3);
-               addr = CORE_CC_REG(base, chipcontrol_data);
+               addr = CORE_CC_REG(pmu->base, chipcontrol_data);
                reg = chip->ops->read32(chip->ctx, addr);
                return (reg & pmu_cc3_mask) != 0;
        case BRCM_CC_43430_CHIP_ID:
@@ -1319,12 +1350,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
                reg = chip->ops->read32(chip->ctx, addr);
                return reg != 0;
        default:
-               addr = CORE_CC_REG(base, pmucapabilities_ext);
+               addr = CORE_CC_REG(pmu->base, pmucapabilities_ext);
                reg = chip->ops->read32(chip->ctx, addr);
                if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
                        return false;
 
-               addr = CORE_CC_REG(base, retention_ctl);
+               addr = CORE_CC_REG(pmu->base, retention_ctl);
                reg = chip->ops->read32(chip->ctx, addr);
                return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
                               PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
index f6b5feea23d20508abf4fc0e6670adf51b966d33..dd0ec3eba6a98505e92005789bcbed7fa53dedb1 100644 (file)
@@ -27,6 +27,7 @@
  * @chip: chip identifier.
  * @chiprev: chip revision.
  * @cc_caps: chipcommon core capabilities.
+ * @cc_caps_ext: chipcommon core extended capabilities.
  * @pmucaps: PMU capabilities.
  * @pmurev: PMU revision.
  * @rambase: RAM base address (only applicable for ARM CR4 chips).
@@ -38,6 +39,7 @@ struct brcmf_chip {
        u32 chip;
        u32 chiprev;
        u32 cc_caps;
+       u32 cc_caps_ext;
        u32 pmucaps;
        u32 pmurev;
        u32 rambase;
@@ -83,6 +85,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
 void brcmf_chip_detach(struct brcmf_chip *chip);
 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub);
 bool brcmf_chip_iscoreup(struct brcmf_core *core);
 void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
 void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
index 4265b50faa98126400434f1c28995ded21e9e7f5..cfee477a6eb1f4b3f4ce5af01d3d19d37d768fb6 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/netdevice.h>
+#include <linux/module.h>
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "core.h"
index 1365c12b78fc39632f2e30c99092092e7a0256a8..7269056d0044d82727f57cf2b05d60f5f93118a7 100644 (file)
@@ -93,7 +93,7 @@ static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
        c = nvp->data[nvp->pos];
        if (c == '\n')
                return COMMENT;
-       if (is_whitespace(c))
+       if (is_whitespace(c) || c == '\0')
                goto proceed;
        if (c == '#')
                return COMMENT;
index 0480b70e3eb84b5b5e627dc022a9589455f85b4d..d5f9ef470447f690d7a26c11f27720196ed8198c 100644 (file)
@@ -1951,6 +1951,9 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
 
 #define BRCMF_PCIE_DEVICE(dev_id)      { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
        PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
+#define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
+       BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
+       subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
 
 static struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
@@ -1966,6 +1969,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
+       BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
index dd6614332836a86443e505d9ee6394ec26ccfb6b..c790fa89db0591d1e1015b9b21383394d03ea027 100644 (file)
@@ -45,8 +45,8 @@
 #include "chip.h"
 #include "firmware.h"
 
-#define DCMD_RESP_TIMEOUT      msecs_to_jiffies(2000)
-#define CTL_DONE_TIMEOUT       msecs_to_jiffies(2000)
+#define DCMD_RESP_TIMEOUT      msecs_to_jiffies(2500)
+#define CTL_DONE_TIMEOUT       msecs_to_jiffies(2500)
 
 #ifdef DEBUG
 
@@ -3615,7 +3615,6 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
        const struct sdiod_drive_str *str_tab = NULL;
        u32 str_mask;
        u32 str_shift;
-       u32 base;
        u32 i;
        u32 drivestrength_sel = 0;
        u32 cc_data_temp;
@@ -3658,14 +3657,15 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
        }
 
        if (str_tab != NULL) {
+               struct brcmf_core *pmu = brcmf_chip_get_pmu(ci);
+
                for (i = 0; str_tab[i].strength != 0; i++) {
                        if (drivestrength >= str_tab[i].strength) {
                                drivestrength_sel = str_tab[i].sel;
                                break;
                        }
                }
-               base = brcmf_chip_get_chipcommon(ci)->base;
-               addr = CORE_CC_REG(base, chipcontrol_addr);
+               addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
                brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
                cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
                cc_data_temp &= ~str_mask;
@@ -3835,8 +3835,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                goto fail;
 
        /* set PMUControl so a backplane reset does PMU state reload */
-       reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
-                              pmucontrol);
+       reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
        reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
        if (err)
                goto fail;
@@ -4114,6 +4113,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
                goto fail;
        }
 
+       /* allocate scatter-gather table. sg support
+        * will be disabled upon allocation failure.
+        */
+       brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+
        /* Query the F2 block size, set roundup accordingly */
        bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
        bus->roundup = min(max_roundup, bus->blocksize);
index 5ec7a6d876723029f3a98a1047e0da9bcbf53d53..23f223150cef2cf4ec768b196d906603b92f36b2 100644 (file)
@@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
 /* Issue an abort to the specified function */
 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
                              enum brcmf_sdiod_state state);
 #ifdef CONFIG_PM_SLEEP
index bec2dc1ca2e406bcf4ac0ee0d00fa27a704cf7f2..61ae2768132a0b16f0f98a57b3319c82adfda95b 100644 (file)
@@ -818,13 +818,15 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 static int
 brcms_ops_ampdu_action(struct ieee80211_hw *hw,
                    struct ieee80211_vif *vif,
-                   enum ieee80211_ampdu_mlme_action action,
-                   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                   u8 buf_size, bool amsdu)
+                   struct ieee80211_ampdu_params *params)
 {
        struct brcms_info *wl = hw->priv;
        struct scb *scb = &wl->wlc->pri_scb;
        int status;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u8 buf_size = params->buf_size;
 
        if (WARN_ON(scb->magic != SCB_MAGIC))
                return -EIDRM;
index fd38aa0763e4f5f30384d29249e7bf205b7d5eeb..b75f4ef3cdc7b278ec837f83fd576444f5ce0008 100644 (file)
@@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
 int
 il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-                       u8 buf_size, bool amsdu)
+                       struct ieee80211_ampdu_params *params)
 {
        struct il_priv *il = hw->priv;
        int ret = -EINVAL;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
 
index 8ab8706f942267fcd2467928b1cbdf248a05921f..e432715e02d89dedcce89615d855233662c26d82 100644 (file)
@@ -182,9 +182,7 @@ void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
                                struct ieee80211_sta *sta, u32 iv32,
                                u16 *phase1key);
 int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                           enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-                           u8 buf_size, bool amsdu);
+                           struct ieee80211_ampdu_params *params);
 int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
 void
index 866067789330ad4f4eaf0cebd7f807aa9403d1ec..11932d53ea2418678773d5004826d52dd0b1c897 100644 (file)
@@ -99,6 +99,18 @@ config IWLWIFI_UAPSD
 
          If unsure, say N.
 
+config IWLWIFI_PCIE_RTPM
+       bool "Enable runtime power management mode for PCIe devices"
+       depends on IWLMVM && PM
+       default false
+       help
+         Say Y here to enable runtime power management for PCIe
+         devices.  If enabled, the device will go into low power mode
+         when idle for a short period of time, allowing for improved
+         power saving during runtime.
+
+        If unsure, say N.
+
 menu "Debugging Options"
 
 config IWLWIFI_DEBUG
index 1aabb5ec096f5061082508bf4fe6acc0e0d96099..1bbd17ada974723623acc6b1da53b77131f6121b 100644 (file)
@@ -152,11 +152,14 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
 {
        struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
        unsigned long on = 0;
+       unsigned long off = 0;
 
        if (brightness > 0)
                on = IWL_LED_SOLID;
+       else
+               off = IWL_LED_SOLID;
 
-       iwl_led_cmd(priv, on, 0);
+       iwl_led_cmd(priv, on, off);
 }
 
 static int iwl_led_blink_set(struct led_classdev *led_cdev,
index 29ea1c6705b406f5c9bb410ddb5feaad6c59eb86..c63ea79571ff5f0d6b0bbfd07b4a6faba42f1593 100644 (file)
@@ -396,7 +396,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
        iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
                    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
 
-       iwl_trans_d3_suspend(priv->trans, false);
+       iwl_trans_d3_suspend(priv->trans, false, true);
 
        goto out;
 
@@ -469,7 +469,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
        /* we'll clear ctx->vif during iwlagn_prepare_restart() */
        vif = ctx->vif;
 
-       ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
+       ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
        if (ret)
                goto out_unlock;
 
@@ -732,12 +732,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
 
 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
-                                  enum ieee80211_ampdu_mlme_action action,
-                                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                  u8 buf_size, bool amsdu)
+                                  struct ieee80211_ampdu_params *params)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
        int ret = -EINVAL;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
        struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 
        IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
index e60cf141ed796f0bbcaf1c8b68cf7a08676ce86e..fa41a5e1c8902002d3bf8a1d3991a416d6600499 100644 (file)
 #define IWL7260_UCODE_API_MAX  17
 #define IWL7265_UCODE_API_MAX  17
 #define IWL7265D_UCODE_API_MAX 20
+#define IWL3168_UCODE_API_MAX  20
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   13
 #define IWL7265_UCODE_API_OK   13
 #define IWL7265D_UCODE_API_OK  13
+#define IWL3168_UCODE_API_OK   20
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  13
 #define IWL7265_UCODE_API_MIN  13
 #define IWL7265D_UCODE_API_MIN 13
+#define IWL3168_UCODE_API_MIN  20
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
@@ -92,6 +95,8 @@
 #define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL3165_NVM_VERSION            0x709
 #define IWL3165_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL3168_NVM_VERSION            0xd01
+#define IWL3168_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265_NVM_VERSION            0x0a1d
 #define IWL7265_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265D_NVM_VERSION           0x0c11
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL3168_FW_PRE "iwlwifi-3168-"
+#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode"
+
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = {
        .ucode_api_ok = IWL7265_UCODE_API_OK,                   \
        .ucode_api_min = IWL7265_UCODE_API_MIN
 
+#define IWL_DEVICE_3008                                                \
+       IWL_DEVICE_7000_COMMON,                                 \
+       .ucode_api_max = IWL3168_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL3168_UCODE_API_OK,                   \
+       .ucode_api_min = IWL3168_UCODE_API_MIN
+
 #define IWL_DEVICE_7005D                                       \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7265D_UCODE_API_MAX,                \
@@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
 
 const struct iwl_cfg iwl3168_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3168",
-       .fw_name_pre = IWL7265D_FW_PRE,
-       IWL_DEVICE_7000,
+       .fw_name_pre = IWL3168_FW_PRE,
+       IWL_DEVICE_3008,
        .ht_params = &iwl7000_ht_params,
-       .nvm_ver = IWL3165_NVM_VERSION,
-       .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+       .nvm_ver = IWL3168_NVM_VERSION,
+       .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
        .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
        .dccm_len = IWL7265_DCCM_LEN,
 };
@@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
index ecbf4822cd697aa9dc1cf0e3a84bf5d376113bd1..4b93404f46a7f049ff2cd23c44f46e5e7e95354c 100644 (file)
@@ -138,7 +138,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .smem_offset = IWL9000_SMEM_OFFSET,                             \
        .smem_len = IWL9000_SMEM_LEN,                                   \
        .thermal_params = &iwl9000_tt_params,                           \
-       .apmg_not_supported = true
+       .apmg_not_supported = true,                                     \
+       .mq_rx_supported = true
 
 const struct iwl_cfg iwl9260_2ac_cfg = {
                .name = "Intel(R) Dual Band Wireless AC 9260",
index f99048135fb996a632675ea426de3d1da34b6224..dad5570d6cc8e33b6eb6e1914bda2e7fdcf37a33 100644 (file)
@@ -311,6 +311,7 @@ struct iwl_pwr_tx_backoff {
  * @dccm2_len: length of the second DCCM
  * @smem_offset: offset from which the SMEM begins
  * @smem_len: the length of SMEM
+ * @mq_rx_supported: multi-queue rx support
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -362,6 +363,7 @@ struct iwl_cfg {
        const u32 smem_len;
        const struct iwl_tt_params *thermal_params;
        bool apmg_not_supported;
+       bool mq_rx_supported;
 };
 
 /*
index 5cc6be927eab95d7f5c2b616bf5ddebe4380f050..4ab6682ea53ea19108434fa94c117c38462b950f 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -312,6 +314,77 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT        28
 #define FH_MEM_TB_MAX_LENGTH                   (0x00020000)
 
+/* 9000 rx series registers */
+
+#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_FRBDCB_WIDX 0xA08080
+#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Read index table */
+#define RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_URBDCB_WIDX 0xA08180
+#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define RFH_Q0_URBDCB_VAID 0xA081C0
+#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define RFH_RBDBUF_RBD0_LSB 0xA08300
+#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/* DMA configuration */
+#define RFH_RXF_DMA_CFG 0xA09820
+/* RB size */
+#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define RFH_RXF_DMA_RB_SIZE_POS 16
+#define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_12K        (0x9 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_16K        (0xA << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_20K        (0xB << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_24K        (0xC << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_28K        (0xD << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_32K        (0xE << RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define RFH_RXF_DMA_RBDCB_SIZE_8       (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_16      (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_32      (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_64      (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_128     (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_256     (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_512     (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_1024    (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_2048    (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
+#define RFH_RXF_DMA_MIN_RB_SIZE_POS    24
+#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
+#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
+#define RFH_DMA_EN_ENABLE_VAL BIT(31)
+
+#define RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define RFH_GEN_CFG    0xA09800
+#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
+#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
+#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
+#define DEFAULT_RXQ_NUM 8
+
+/* end of 9000 rx series registers */
+
 /* TFDB  Area - TFDs buffer table */
 #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
 #define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
@@ -434,6 +507,10 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
  */
 #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN   (0x00000002)
 
+#define MQ_RX_TABLE_SIZE               512
+#define MQ_RX_TABLE_MASK               (MQ_RX_TABLE_SIZE - 1)
+#define MQ_RX_POOL_SIZE                        MQ_RX_TABLE_MASK
+
 #define RX_QUEUE_SIZE                         256
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
index a5aaf685370462d97bae6b9c0935c567ad758bfa..8425e1a587d97a5ea00d914ce8f78538df1eb910 100644 (file)
@@ -293,6 +293,8 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
  * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
  *     threshold.
  * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ *  the firmware sends a tx reply.
  */
 enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_INVALID = 0,
@@ -309,6 +311,7 @@ enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_BA,
        FW_DBG_TRIGGER_TX_LATENCY,
        FW_DBG_TRIGGER_TDLS,
+       FW_DBG_TRIGGER_TX_STATUS,
 
        /* must be last */
        FW_DBG_TRIGGER_MAX,
index 84f8aeb926c8748f443e15ab74c01dfd1a51f77d..e2dbc67a367b9a609d7a63b1e7d26da3c7a77243 100644 (file)
@@ -297,10 +297,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
  *     which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
  * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
  *     sources for the MCC. This TLV bit is a future replacement to
@@ -313,6 +315,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
  * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
  *     antenna the beacon should be transmitted
+ * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ *     from AP and will send it upon d0i3 exit.
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
@@ -330,10 +334,12 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = (__force iwl_ucode_tlv_capa_t)11,
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)12,
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
+       IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG            = (__force iwl_ucode_tlv_capa_t)17,
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
        IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
        IWL_UCODE_TLV_CAPA_CSUM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)21,
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
+       IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD         = (__force iwl_ucode_tlv_capa_t)26,
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
@@ -341,7 +347,9 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
        IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS            = (__force iwl_ucode_tlv_capa_t)65,
        IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT             = (__force iwl_ucode_tlv_capa_t)67,
+       IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT       = (__force iwl_ucode_tlv_capa_t)68,
        IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION         = (__force iwl_ucode_tlv_capa_t)71,
+       IWL_UCODE_TLV_CAPA_BEACON_STORING               = (__force iwl_ucode_tlv_capa_t)72,
        IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2               = (__force iwl_ucode_tlv_capa_t)73,
 
        NUM_IWL_UCODE_TLV_CAPA
@@ -747,6 +755,19 @@ struct iwl_fw_dbg_trigger_tdls {
        u8 reserved[4];
 } __packed;
 
+/**
+ * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response
+ *  status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tx_status {
+       struct tx_status {
+               u8 status;
+               u8 reserved[3];
+       } __packed statuses[16];
+       __le32 reserved[2];
+} __packed;
+
 /**
  * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
  * @id: conf id
index fd42f63f5e84a0b7db8c3be5831e956704fe5868..b88ecc7892a903c791dac8131fcb7fa600f2d44e 100644 (file)
@@ -108,6 +108,8 @@ enum iwl_amsdu_size {
  * @power_level: power level, default = 1
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
+ * @nvm_file: specifies a external NVM file
+ * @uapsd_disable: disable U-APSD, default = 1
  * @d0i3_disable: disable d0i3, default = 1,
  * @d0i3_entry_delay: time to wait after no refs are taken before
  *     entering D0i3 (in msecs)
index 7b89bfc8c8ac3b1025b913e1dfd0bd017853dfa0..50f4cc60cf3e9800fdecfe1fe6c40db199f159fb 100644 (file)
@@ -539,7 +539,7 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
                                           const __le16 *nvm_hw,
-                                          u32 mac_addr0, u32 mac_addr1)
+                                          __le32 mac_addr0, __le32 mac_addr1)
 {
        const u8 *hw_addr;
 
@@ -583,7 +583,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
 
                if (!is_valid_ether_addr(data->hw_addr))
                        IWL_ERR_DEV(dev,
-                                   "mac address from hw section is not valid\n");
+                                   "mac address (%pM) from hw section is not valid\n",
+                                   data->hw_addr);
 
                return;
        }
@@ -597,7 +598,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  u32 mac_addr0, u32 mac_addr1)
+                  __le32 mac_addr0, __le32 mac_addr1)
 {
        struct iwl_nvm_data *data;
        u32 sku;
index 92466ee72806218a5825da285ddebdc2e6768d9c..4e8e0dc474d49fec9c33d9b8598c1eb050f70cc5 100644 (file)
@@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  u32 mac_addr0, u32 mac_addr1);
+                  __le32 mac_addr0, __le32 mac_addr1);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
index 82fb3a97a46d3f6a165a5623d4a5be83883b092e..0ca0f13b69b0fa66e4d7ee13ed1f4c301bb2a1f6 100644 (file)
@@ -506,7 +506,7 @@ struct iwl_trans_config {
        bool sw_csum_tx;
        const struct iwl_hcmd_arr *command_groups;
        int command_groups_size;
+
        u32 sdio_adma_addr;
 };
 
@@ -618,9 +618,9 @@ struct iwl_trans_ops {
        void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
        void (*stop_device)(struct iwl_trans *trans, bool low_power);
 
-       void (*d3_suspend)(struct iwl_trans *trans, bool test);
+       void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
        int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
-                        bool test);
+                        bool test, bool reset);
 
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
@@ -736,6 +736,11 @@ enum iwl_plat_pm_mode {
        IWL_PLAT_PM_MODE_D0I3,
 };
 
+/* Max time to wait for trans to become idle/non-idle on d0i3
+ * enter/exit (in msecs).
+ */
+#define IWL_TRANS_IDLE_TIMEOUT 2000
+
 /**
  * struct iwl_trans - transport common data
  *
@@ -920,22 +925,23 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
        _iwl_trans_stop_device(trans, true);
 }
 
-static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
+                                       bool reset)
 {
        might_sleep();
        if (trans->ops->d3_suspend)
-               trans->ops->d3_suspend(trans, test);
+               trans->ops->d3_suspend(trans, test, reset);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
                                      enum iwl_d3_status *status,
-                                     bool test)
+                                     bool test, bool reset)
 {
        might_sleep();
        if (!trans->ops->d3_resume)
                return 0;
 
-       return trans->ops->d3_resume(trans, status, test);
+       return trans->ops->d3_resume(trans, status, test, reset);
 }
 
 static inline void iwl_trans_ref(struct iwl_trans *trans)
index b00c03fcd4473cc670cd1489c3504c767de62c65..4b560e4417ee55fa1da2539dbe7f3c1482fe8790 100644 (file)
@@ -6,7 +6,8 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +33,8 @@
  * BSD LICENSE
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -73,6 +75,7 @@
 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
 #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT       (2 * 1024) /* defined in TU */
 #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT       (40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_UAPSD_STANDALONE           0
 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE       0
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK       1
 #define IWL_MVM_TOF_IS_RESPONDER               0
 #define IWL_MVM_SW_TX_CSUM_OFFLOAD             0
+#define IWL_MVM_COLLECT_FW_ERR_DUMP            1
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index d3e21d95cecec52830163c4cd674d444b6940a41..346376187ef849455ae1b60da0039cc2e48fd633 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016   Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016   Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -851,7 +853,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
        wowlan_config_cmd->is_11n_connection =
                                        ap_sta->ht_cap.ht_supported;
        wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
-               ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+               ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING |
+               ENABLE_STORE_BEACON;
 
        /* Query the last used seqno and set it */
        ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1023,14 +1026,18 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
                      struct ieee80211_sta *ap_sta)
 {
        int ret;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-       ret = iwl_mvm_switch_to_d3(mvm);
-       if (ret)
-               return ret;
+       if (!unified_image) {
+               ret = iwl_mvm_switch_to_d3(mvm);
+               if (ret)
+                       return ret;
 
-       ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
-       if (ret)
-               return ret;
+               ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+               if (ret)
+                       return ret;
+       }
 
        if (!iwlwifi_mod_params.sw_crypto) {
                /*
@@ -1072,10 +1079,14 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
 {
        struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
        int ret;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-       ret = iwl_mvm_switch_to_d3(mvm);
-       if (ret)
-               return ret;
+       if (!unified_image) {
+               ret = iwl_mvm_switch_to_d3(mvm);
+               if (ret)
+                       return ret;
+       }
 
        /* rfkill release can be either for wowlan or netdetect */
        if (wowlan->rfkill_release)
@@ -1151,6 +1162,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        };
        int ret;
        int len __maybe_unused;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
        if (!wowlan) {
                /*
@@ -1236,7 +1249,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 
-       iwl_trans_d3_suspend(mvm->trans, test);
+       iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
  out:
        if (ret < 0) {
                iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1299,7 +1312,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
                mutex_unlock(&mvm->d0i3_suspend_mutex);
 
-               iwl_trans_d3_suspend(trans, false);
+               iwl_trans_d3_suspend(trans, false, false);
 
                return 0;
        }
@@ -2041,9 +2054,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 {
        struct ieee80211_vif *vif = NULL;
-       int ret;
+       int ret = 1;
        enum iwl_d3_status d3_status;
        bool keep = false;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+       u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+                                   CMD_WAKE_UP_TRANS;
 
        mutex_lock(&mvm->mutex);
 
@@ -2052,7 +2070,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        if (IS_ERR_OR_NULL(vif))
                goto err;
 
-       ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
+       ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
        if (ret)
                goto err;
 
@@ -2095,17 +2113,28 @@ out_iterate:
                        iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
 out:
-       /* return 1 to reconfigure the device */
+       if (unified_image && !ret) {
+               ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+               if (!ret) /* D3 ended successfully - no need to reset device */
+                       return 0;
+       }
+
+       /*
+        * Reconfigure the device in one of the following cases:
+        * 1. We are not using a unified image
+        * 2. We are using a unified image but had an error while exiting D3
+        */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
-
-       /* We always return 1, which causes mac80211 to do a reconfig
-        * with IEEE80211_RECONFIG_TYPE_RESTART.  This type of
-        * reconfig calls iwl_mvm_restart_complete(), where we unref
-        * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
-        * reference here.
+       /*
+        * When switching images we return 1, which causes mac80211
+        * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
+        * This type of reconfig calls iwl_mvm_restart_complete(),
+        * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
+        * to take the reference here.
         */
        iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
        return 1;
 }
 
@@ -2122,7 +2151,7 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
        enum iwl_d3_status d3_status;
        struct iwl_trans *trans = mvm->trans;
 
-       iwl_trans_d3_resume(trans, &d3_status, false);
+       iwl_trans_d3_resume(trans, &d3_status, false, false);
 
        /*
         * make sure to clear D0I3_DEFER_WAKEUP before
index 9e0d46368cdd1c0581f218ce757ab8a258ee93e3..14004456bf550db58ff95c511e6dee3203e32fed 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1255,6 +1257,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
+       bool prev;
        u8 value;
        int ret;
 
@@ -1265,7 +1268,9 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
                return -EINVAL;
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_update_low_latency(mvm, vif, value);
+       prev = iwl_mvm_vif_low_latency(mvmvif);
+       mvmvif->low_latency_dbgfs = value;
+       iwl_mvm_update_low_latency(mvm, vif, prev);
        mutex_unlock(&mvm->mutex);
 
        return count;
@@ -1277,11 +1282,15 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
 {
        struct ieee80211_vif *vif = file->private_data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       char buf[2];
+       char buf[30] = {};
+       int len;
 
-       buf[0] = mvmvif->low_latency ? '1' : '0';
-       buf[1] = '\n';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+       len = snprintf(buf, sizeof(buf) - 1,
+                      "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+                      mvmvif->low_latency_traffic,
+                      mvmvif->low_latency_dbgfs,
+                      mvmvif->low_latency_vcmd);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
 static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
@@ -1363,6 +1372,59 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
 }
 
+static void iwl_dbgfs_quota_check(void *data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int *ret = data;
+
+       if (mvmvif->dbgfs_quota_min)
+               *ret = -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       u16 value;
+       int ret;
+
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       if (value > 95)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+
+       mvmvif->dbgfs_quota_min = 0;
+       ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                    iwl_dbgfs_quota_check, &ret);
+       if (ret == 0) {
+               mvmvif->dbgfs_quota_min = value;
+               iwl_mvm_update_quotas(mvm, false, NULL);
+       }
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       char buf[10];
+       int len;
+
+       len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1386,6 +1448,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
 MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -1423,6 +1486,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                 S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
+                                S_IRUSR | S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index 90500e2d107b60ada39c82adaacedc9a5051cb7e..c529e5355803f10a923b601f5dc46acbd62cfd66 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -261,17 +262,18 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
 {
        struct iwl_mvm *mvm = file->private_data;
        char buf[16];
-       int pos, temp;
+       int pos, ret;
+       s32 temp;
 
        if (!mvm->ucode_loaded)
                return -EIO;
 
        mutex_lock(&mvm->mutex);
-       temp = iwl_mvm_get_temp(mvm);
+       ret = iwl_mvm_get_temp(mvm, &temp);
        mutex_unlock(&mvm->mutex);
 
-       if (temp < 0)
-               return temp;
+       if (ret)
+               return -EIO;
 
        pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
 
@@ -942,6 +944,47 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
        return count;
 }
 
+static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
+                                              char *buf, size_t count,
+                                              loff_t *ppos)
+{
+       struct iwl_rss_config_cmd cmd = {
+               .flags = cpu_to_le32(IWL_RSS_ENABLE),
+               .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+                            IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+       };
+       int ret, i, num_repeats, nbytes = count / 2;
+
+       ret = hex2bin(cmd.indirection_table, buf, nbytes);
+       if (ret)
+               return ret;
+
+       /*
+        * The input is the redirection table, partial or full.
+        * Repeat the pattern if needed.
+        * For example, input of 01020F will be repeated 42 times,
+        * indirecting RSS hash results to queues 1, 2, 15 (skipping
+        * queues 3 - 14).
+        */
+       num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+       for (i = 1; i < num_repeats; i++)
+               memcpy(&cmd.indirection_table[i * nbytes],
+                      cmd.indirection_table, nbytes);
+       /* handle cut in the middle pattern for the last places */
+       memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+              ARRAY_SIZE(cmd.indirection_table) % nbytes);
+
+       memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
+
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
 static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
                                          char __user *user_buf,
                                          size_t count, loff_t *ppos)
@@ -983,7 +1026,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
            trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return -EOPNOTSUPP;
 
-       ret = kstrtouint(buf, 0, &rec_mode);
+       ret = kstrtoint(buf, 0, &rec_mode);
        if (ret)
                return ret;
 
@@ -1454,6 +1497,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
 MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16);
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1498,11 +1542,15 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
        if (!debugfs_create_bool("enable_scan_iteration_notif",
                                 S_IRUSR | S_IWUSR,
                                 mvm->debugfs_dir,
                                 &mvm->scan_iter_notif_enabled))
                goto err;
+       if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR,
+                                mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
+               goto err;
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
index 62b9a0a96700738ad5df1c15a8d02305804fbb0d..eec52c57f71831014d550c917f5452015373d5fd 100644 (file)
@@ -251,6 +251,7 @@ enum iwl_wowlan_flags {
        ENABLE_L3_FILTERING     = BIT(1),
        ENABLE_NBNS_FILTERING   = BIT(2),
        ENABLE_DHCP_FILTERING   = BIT(3),
+       ENABLE_STORE_BEACON     = BIT(4),
 };
 
 struct iwl_wowlan_config_cmd {
index fb6d341d6f3dce33f26fe4e1abe73a531dc58d80..df939f51d9b97e75ddf854fa246662dbbc27f143 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -287,16 +287,13 @@ enum iwl_rx_mpdu_status {
        IWL_RX_MPDU_STATUS_KEY_ERROR            = BIT(4),
        IWL_RX_MPDU_STATUS_ICV_OK               = BIT(5),
        IWL_RX_MPDU_STATUS_MIC_OK               = BIT(6),
-       /* TODO - verify this is the correct value */
        IWL_RX_MPDU_RES_STATUS_TTAK_OK          = BIT(7),
        IWL_RX_MPDU_STATUS_SEC_MASK             = 0x7 << 8,
        IWL_RX_MPDU_STATUS_SEC_NONE             = 0x0 << 8,
        IWL_RX_MPDU_STATUS_SEC_WEP              = 0x1 << 8,
        IWL_RX_MPDU_STATUS_SEC_CCM              = 0x2 << 8,
        IWL_RX_MPDU_STATUS_SEC_TKIP             = 0x3 << 8,
-       /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */
        IWL_RX_MPDU_STATUS_SEC_EXT_ENC          = 0x4 << 8,
-       /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */
        IWL_RX_MPDU_STATUS_SEC_GCM              = 0x5 << 8,
        IWL_RX_MPDU_STATUS_DECRYPTED            = BIT(11),
        IWL_RX_MPDU_STATUS_WEP_MATCH            = BIT(12),
@@ -350,11 +347,11 @@ struct iwl_rx_mpdu_desc {
        /* DW8 */
        __le32 filter_match;
        /* DW9 */
-       __le32 gp2_on_air_rise;
-       /* DW10 */
        __le32 rate_n_flags;
+       /* DW10 */
+       u8 energy_a, energy_b, channel, reserved;
        /* DW11 */
-       u8 energy_a, energy_b, energy_c, channel;
+       __le32 gp2_on_air_rise;
        /* DW12 & DW13 */
        __le64 tsf_on_air_rise;
 } __packed;
@@ -365,4 +362,33 @@ struct iwl_frame_release {
        __le16 nssn;
 };
 
+enum iwl_rss_hash_func_en {
+       IWL_RSS_HASH_TYPE_IPV4_TCP,
+       IWL_RSS_HASH_TYPE_IPV4_UDP,
+       IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
+       IWL_RSS_HASH_TYPE_IPV6_TCP,
+       IWL_RSS_HASH_TYPE_IPV6_UDP,
+       IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+};
+
+#define IWL_RSS_HASH_KEY_CNT 10
+#define IWL_RSS_INDIRECTION_TABLE_SIZE 128
+#define IWL_RSS_ENABLE 1
+
+/**
+ * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration
+ *
+ * @flags: 1 - enable, 0 - disable
+ * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en
+ * @secret_key: 320 bit input of random key configuration from driver
+ * @indirection_table: indirection table
+ */
+struct iwl_rss_config_cmd {
+       __le32 flags;
+       u8 hash_mask;
+       u8 reserved[3];
+       __le32 secret_key[IWL_RSS_HASH_KEY_CNT];
+       u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
+} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
+
 #endif /* __fw_api_rx_h__ */
index 6fca4fb1d3064d8b090e57c9ed681d33ca744ea7..90d9113948363740ae4cda336bcf19a2649c2e8c 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -253,6 +255,68 @@ struct iwl_mvm_keyinfo {
        __le64 hw_tkip_mic_tx_key;
 } __packed;
 
+#define IWL_ADD_STA_STATUS_MASK        0xFF
+#define IWL_ADD_STA_BAID_MASK  0xFF00
+
+/**
+ * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ *     alone. 1 - modify, 0 - don't change.
+ * @station_flags: look at %iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ *     Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ *     add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ *     Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ *     add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ *     asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ *     keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ *     mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd_v7 {
+       u8 add_modify;
+       u8 awake_acs;
+       __le16 tid_disable_tx;
+       __le32 mac_id_n_color;
+       u8 addr[ETH_ALEN];      /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+       __le16 reserved2;
+       u8 sta_id;
+       u8 modify_mask;
+       __le16 reserved3;
+       __le32 station_flags;
+       __le32 station_flags_msk;
+       u8 add_immediate_ba_tid;
+       u8 remove_immediate_ba_tid;
+       __le16 add_immediate_ba_ssn;
+       __le16 sleep_tx_count;
+       __le16 sleep_state_flags;
+       __le16 assoc_id;
+       __le16 beamform_flags;
+       __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+
 /**
  * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
  * ( REPLY_ADD_STA = 0x18 )
@@ -282,6 +346,7 @@ struct iwl_mvm_keyinfo {
  *     mac-addr.
  * @beamform_flags: beam forming controls
  * @tfd_queue_msk: tfd queues used by this station
+ * @rx_ba_window: aggregation window size
  *
  * The device contains an internal table of per-station information, with info
  * on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -310,7 +375,9 @@ struct iwl_mvm_add_sta_cmd {
        __le16 assoc_id;
        __le16 beamform_flags;
        __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+       __le16 rx_ba_window;
+       __le16 reserved;
+} __packed; /* ADD_STA_CMD_API_S_VER_8 */
 
 /**
  * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
index 0036d18334af4ab893b0b45214e972a3836d7885..ba3f0bbddde8874db03a74cd9a0ca01df862bc38 100644 (file)
@@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp {
  * @scd_ssn: the index of the last contiguously sent packet
  * @txed: number of Txed frames in this batch
  * @txed_2_done: number of Acked frames in this batch
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ *     not a copy from the LQ command. Thus, if not the first rate was used
+ *     for Tx-ing then this value will be set to 0 by FW.
  */
 struct iwl_mvm_ba_notif {
        __le32 sta_addr_lo32;
@@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif {
        __le16 scd_ssn;
        u8 txed;
        u8 txed_2_done;
-       __le16 reserved1;
+       u8 reduced_txp;
+       u8 reserved1;
 } __packed;
 
 /*
index 82049bb139c26b2ae68f3d8a8ce191bde5f97de5..f332497e29d161a03a5fe16a26fca2804cb35e43 100644 (file)
@@ -213,6 +213,8 @@ enum {
 
        MFUART_LOAD_NOTIFICATION = 0xb1,
 
+       RSS_CONFIG_CMD = 0xb3,
+
        REPLY_RX_PHY_CMD = 0xc0,
        REPLY_RX_MPDU_CMD = 0xc1,
        FRAME_RELEASE = 0xc3,
@@ -280,11 +282,16 @@ enum iwl_phy_ops_subcmd_ids {
        DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
+enum iwl_prot_offload_subcmd_ids {
+       STORED_BEACON_NTF = 0xFF,
+};
+
 /* command groups */
 enum {
        LEGACY_GROUP = 0x0,
        LONG_GROUP = 0x1,
        PHY_OPS_GROUP = 0x4,
+       PROT_OFFLOAD_GROUP = 0xb,
 };
 
 /**
@@ -1851,4 +1858,28 @@ struct iwl_shared_mem_cfg {
        __le32 page_buff_size;
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
 
+#define MAX_STORED_BEACON_SIZE 600
+
+/**
+ * Stored beacon notification
+ *
+ * @system_time: system time on air rise
+ * @tsf: TSF on air rise
+ * @beacon_timestamp: beacon on air rise
+ * @phy_flags: general phy flags: band, modulation, etc.
+ * @channel: channel this beacon was received on
+ * @rates: rate in ucode internal format
+ * @byte_count: frame's byte count
+ */
+struct iwl_stored_beacon_notif {
+       __le32 system_time;
+       __le64 tsf;
+       __le32 beacon_timestamp;
+       __le16 phy_flags;
+       __le16 channel;
+       __le32 rates;
+       __le32 byte_count;
+       u8 data[MAX_STORED_BEACON_SIZE];
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+
 #endif /* __fw_api_h__ */
index 0813f8184e10cbcb2472910847b789e7d57f454d..4856eac120f60d5eff2e9707e828e541761a76ae 100644 (file)
@@ -435,6 +435,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        bool monitor_dump_only = false;
        int i;
 
+       if (!IWL_MVM_COLLECT_FW_ERR_DUMP &&
+           !mvm->trans->dbg_dest_tlv)
+               return;
+
        lockdep_assert_held(&mvm->mutex);
 
        /* there's no point in fw dump if the bus is dead */
@@ -640,8 +644,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        /* Dump fw's virtual image */
        if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
-               u32 i;
-
                for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
                        struct iwl_fw_error_dump_paging *paging;
                        struct page *pages =
index 4ed5180c547bb6ce8af288a6653220b5cf2c61b4..070e2af05ca25bc5545373e3738c8c30b67cd283 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -107,6 +108,24 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+{
+       int i;
+       struct iwl_rss_config_cmd cmd = {
+               .flags = cpu_to_le32(IWL_RSS_ENABLE),
+               .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+                            IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+       };
+
+       for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+               cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
+       memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
+
+       return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+}
+
 static void iwl_free_fw_paging(struct iwl_mvm *mvm)
 {
        int i;
@@ -894,6 +913,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       /* Init RSS configuration */
+       if (iwl_mvm_has_new_rx_api(mvm)) {
+               ret = iwl_send_rss_cfg_cmd(mvm);
+               if (ret) {
+                       IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
+                               ret);
+                       goto error;
+               }
+       }
+
        /* init the fw <-> mac80211 STA mapping */
        for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
index bf1e5eb5dbdba3657071ae1492e52b7073e6a9cd..535134d639e0ec70498a1a418cce0344ea5656ff 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
                 * wake-ups.
                 */
                cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
-               if (mvmvif->ap_assoc_sta_count) {
+               if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
                        cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
                        IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
                } else {
@@ -1462,3 +1462,40 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                                   iwl_mvm_beacon_loss_iterator,
                                                   mb);
 }
+
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+       struct ieee80211_rx_status rx_status;
+       struct sk_buff *skb;
+       u32 size = le32_to_cpu(sb->byte_count);
+
+       if (size == 0)
+               return;
+
+       skb = alloc_skb(size, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(mvm, "alloc_skb failed\n");
+               return;
+       }
+
+       /* update rx_status according to the notification's metadata */
+       memset(&rx_status, 0, sizeof(rx_status));
+       rx_status.mactime = le64_to_cpu(sb->tsf);
+       rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+       rx_status.band =
+               (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
+                                              rx_status.band);
+
+       /* copy the data */
+       memcpy(skb_put(skb, size), sb->data, size);
+       memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+       /* pass it as regular rx to mac80211 */
+       ieee80211_rx_napi(mvm->hw, skb, NULL);
+}
index d70a1716f3e08e8856ec30aa66376b8437ef3108..24c46c2232463d5b42d2314ce30da5aaaf13d964 100644 (file)
@@ -837,13 +837,16 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
-                                   enum ieee80211_ampdu_mlme_action action,
-                                   struct ieee80211_sta *sta, u16 tid,
-                                   u16 *ssn, u8 buf_size, bool amsdu)
+                                   struct ieee80211_ampdu_params *params)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
        bool tx_agg_ref = false;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
 
        IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
                     sta->addr, tid, action);
@@ -884,10 +887,10 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                        ret = -EINVAL;
                        break;
                }
-               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
+               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
                break;
        case IEEE80211_AMPDU_RX_STOP:
-               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
                break;
        case IEEE80211_AMPDU_TX_START:
                if (!iwl_enable_tx_ampdu(mvm->cfg)) {
index 5f3ac8cccf49d2c5a07644f894b1ba920dca8bfc..ebe37bb0ce4c4f42643bc6443d4b55246de132f3 100644 (file)
@@ -346,8 +346,9 @@ struct iwl_mvm_vif_bf_data {
  * @pm_enabled - Indicate if MAC power management is allowed
  * @monitor_active: indicates that monitor context is configured, and that the
  *     interface should get quota etc.
- * @low_latency: indicates that this interface is in low-latency mode
- *     (VMACLowLatencyMode)
+ * @low_latency_traffic: indicates low latency traffic was detected
+ * @low_latency_dbgfs: low latency mode set from debugfs
+ * @low_latency_vcmd: low latency mode set from vendor command
  * @ps_disabled: indicates that this interface requires PS to be disabled
  * @queue_params: QoS params for this MAC
  * @bcast_sta: station used for broadcast packets. Used by the following
@@ -375,7 +376,7 @@ struct iwl_mvm_vif {
        bool ap_ibss_active;
        bool pm_enabled;
        bool monitor_active;
-       bool low_latency;
+       bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd;
        bool ps_disabled;
        struct iwl_mvm_vif_bf_data bf_data;
 
@@ -432,6 +433,7 @@ struct iwl_mvm_vif {
        struct iwl_dbgfs_pm dbgfs_pm;
        struct iwl_dbgfs_bf dbgfs_bf;
        struct iwl_mac_power_cmd mac_pwr_cmd;
+       int dbgfs_quota_min;
 #endif
 
        enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
@@ -645,6 +647,7 @@ struct iwl_mvm {
        atomic_t pending_frames[IWL_MVM_STATION_COUNT];
        u32 tfd_drained[IWL_MVM_STATION_COUNT];
        u8 rx_ba_sessions;
+       u32 secret_key[IWL_RSS_HASH_KEY_CNT];
 
        /* configured by mac80211 */
        u32 rts_threshold;
@@ -856,6 +859,12 @@ struct iwl_mvm {
 
        u32 ciphers[6];
        struct iwl_mvm_tof_data tof_data;
+
+       /*
+        * Drop beacons from other APs in AP mode when there are no connected
+        * clients.
+        */
+       bool drop_bcn_ap_mode;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -1005,10 +1014,18 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
                IWL_MVM_BT_COEX_MPLUT;
 }
 
+static inline
+bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
+               IWL_MVM_P2P_UAPSD_STANDALONE;
+}
+
 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
 {
-       /* firmware flag isn't defined yet */
-       return false;
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
 }
 
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@ -1184,6 +1201,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1417,8 +1436,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
         * binding, so this has no real impact. For now, just return
         * the current desired low-latency state.
         */
-
-       return mvmvif->low_latency;
+       return mvmvif->low_latency_dbgfs ||
+              mvmvif->low_latency_traffic ||
+              mvmvif->low_latency_vcmd;
 }
 
 /* hw scheduler queue config */
@@ -1481,7 +1501,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
-int iwl_mvm_get_temp(struct iwl_mvm *mvm);
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
 
 /* Location Aware Regulatory */
 struct iwl_mcc_update_resp *
index 7a3da2da6fd0bfe280415849764ca84361ea573a..c446e0da97899a45781d87e4b5bcb7341ee0c1fd 100644 (file)
@@ -300,7 +300,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        struct iwl_nvm_section *sections = mvm->nvm_sections;
        const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
        bool lar_enabled;
-       u32 mac_addr0, mac_addr1;
+       __le32 mac_addr0, mac_addr1;
 
        /* Checking for required sections */
        if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -337,8 +337,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                return NULL;
 
        /* read the mac address from WFMP registers */
-       mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
-       mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
+       mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                   WFMP_MAC_ADDR_0));
+       mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                   WFMP_MAC_ADDR_1));
 
        hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
index 89ea70deeb84410edd0e3ad07ae77abc08476107..09a94a5efb61111d150fa1616e21467a922485d9 100644 (file)
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -267,6 +268,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
                   true),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
        RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
+       RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+                      iwl_mvm_rx_stored_beacon_notif, false),
 
 };
 #undef RX_HANDLER
@@ -344,6 +347,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(MAC_PM_POWER_TABLE),
        HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+       HCMD_NAME(RSS_CONFIG_CMD),
        HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
        HCMD_NAME(REPLY_RX_PHY_CMD),
        HCMD_NAME(REPLY_RX_MPDU_CMD),
@@ -386,13 +390,20 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
        HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
 };
 
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+       HCMD_NAME(STORED_BEACON_NTF),
+};
+
 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
+       [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
 };
 
-
 /* this forward declaration can avoid to export the function */
 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
@@ -481,6 +492,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        }
        mvm->sf_state = SF_UNINIT;
        mvm->cur_ucode = IWL_UCODE_INIT;
+       mvm->drop_bcn_ap_mode = true;
 
        mutex_init(&mvm->mutex);
        mutex_init(&mvm->d0i3_suspend_mutex);
@@ -641,6 +653,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        iwl_mvm_tof_init(mvm);
 
+       /* init RSS hash key */
+       get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key));
+
        return op_mode;
 
  out_unregister:
@@ -1196,7 +1211,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
        cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
        cmd->offloading_tid = iter_data->offloading_tid;
        cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
-               ENABLE_DHCP_FILTERING;
+               ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
        /*
         * The d0i3 uCode takes care of the nonqos counters,
         * so configure only the qos seq ones.
@@ -1217,8 +1232,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
        struct iwl_wowlan_config_cmd wowlan_config_cmd = {
                .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
                                             IWL_WOWLAN_WAKEUP_BEACON_MISS |
-                                            IWL_WOWLAN_WAKEUP_LINK_CHANGE |
-                                            IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+                                            IWL_WOWLAN_WAKEUP_LINK_CHANGE),
        };
        struct iwl_d3_manager_config d3_cfg_cmd = {
                .min_sleep_time = cpu_to_le32(1000),
@@ -1268,6 +1282,12 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
 
        /* configure wowlan configuration only if needed */
        if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+               /* wake on beacons only if beacon storing isn't supported */
+               if (!fw_has_capa(&mvm->fw->ucode_capa,
+                                IWL_UCODE_TLV_CAPA_BEACON_STORING))
+                       wowlan_config_cmd.wakeup_filter |=
+                               cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+
                iwl_mvm_wowlan_config_key_params(mvm,
                                                 d0i3_iter_data.connected_vif,
                                                 true, flags);
index 9de159f1ef2dd366fbfeb67a75c1547952a26087..f313910cd0269f9e764b527ae886a49b1d9e4ef7 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -259,6 +259,26 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
 }
 
+static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       bool *is_p2p_standalone = _data;
+
+       switch (ieee80211_vif_type_p2p(vif)) {
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_AP:
+               *is_p2p_standalone = false;
+               break;
+       case NL80211_IFTYPE_STATION:
+               if (vif->bss_conf.assoc)
+                       *is_p2p_standalone = false;
+               break;
+
+       default:
+               break;
+       }
+}
+
 static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif)
 {
@@ -268,9 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
                    ETH_ALEN))
                return false;
 
-       if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
-               return false;
        /*
         * Avoid using uAPSD if P2P client is associated to GO that uses
         * opportunistic power save. This is due to current FW limitation.
@@ -287,6 +304,22 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
        if (iwl_mvm_phy_ctx_count(mvm) >= 2)
                return false;
 
+       if (vif->p2p) {
+               /* Allow U-APSD only if p2p is stand alone */
+               bool is_p2p_standalone = true;
+
+               if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm))
+                       return false;
+
+               ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                       IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_p2p_standalone_iterator,
+                                       &is_p2p_standalone);
+
+               if (!is_p2p_standalone)
+                       return false;
+       }
+
        return true;
 }
 
@@ -544,7 +577,6 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
 
 struct iwl_power_vifs {
        struct iwl_mvm *mvm;
-       struct ieee80211_vif *bf_vif;
        struct ieee80211_vif *bss_vif;
        struct ieee80211_vif *p2p_vif;
        struct ieee80211_vif *ap_vif;
@@ -617,11 +649,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
                if (mvmvif->phy_ctxt)
                        if (mvmvif->phy_ctxt->id < MAX_PHYS)
                                power_iterator->bss_active = true;
-
-               if (mvmvif->bf_data.bf_enabled &&
-                   !WARN_ON(power_iterator->bf_vif))
-                       power_iterator->bf_vif = vif;
-
                break;
 
        default:
@@ -850,29 +877,9 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
        return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
 }
 
-static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
-                                      struct ieee80211_vif *vif,
-                                      bool enable)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_beacon_filter_cmd cmd = {
-               IWL_BF_CMD_CONFIG_DEFAULTS,
-               .bf_enable_beacon_filter = cpu_to_le32(1),
-       };
-
-       if (!mvmvif->bf_data.bf_enabled)
-               return 0;
-
-       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
-               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-
-       mvmvif->bf_data.ba_enabled = enable;
-       return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
-}
-
-int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
-                                 struct ieee80211_vif *vif,
-                                 u32 flags)
+static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         u32 flags, bool d0i3)
 {
        struct iwl_beacon_filter_cmd cmd = {};
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -883,12 +890,20 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
 
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
 
-       if (!ret)
+       /* don't change bf_enabled in case of temporary d0i3 configuration */
+       if (!ret && !d0i3)
                mvmvif->bf_data.bf_enabled = false;
 
        return ret;
 }
 
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif,
+                                 u32 flags)
+{
+       return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
+}
+
 static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
 {
        bool disable_ps;
@@ -918,21 +933,26 @@ static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
 }
 
 static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
-                               struct iwl_power_vifs *vifs)
+                               struct ieee80211_vif *vif)
 {
-       struct iwl_mvm_vif *mvmvif;
-       bool ba_enable;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
+       };
 
-       if (!vifs->bf_vif)
+       if (!mvmvif->bf_data.bf_enabled)
                return 0;
 
-       mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif);
+       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
 
-       ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
-                     !vifs->bf_vif->bss_conf.ps ||
-                     iwl_mvm_vif_low_latency(mvmvif));
+       mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
+                                      mvm->ps_disabled ||
+                                      !vif->bss_conf.ps ||
+                                      iwl_mvm_vif_low_latency(mvmvif));
 
-       return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable);
+       return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
 }
 
 int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
@@ -953,7 +973,10 @@ int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       return iwl_mvm_power_set_ba(mvm, &vifs);
+       if (vifs.bss_vif)
+               return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+       return 0;
 }
 
 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
@@ -988,7 +1011,10 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
                        return ret;
        }
 
-       return iwl_mvm_power_set_ba(mvm, &vifs);
+       if (vifs.bss_vif)
+               return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+       return 0;
 }
 
 int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
@@ -1025,8 +1051,17 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
                        IWL_BF_CMD_CONFIG_D0I3,
                        .bf_enable_beacon_filter = cpu_to_le32(1),
                };
-               ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
-                                                   flags, true);
+               /*
+                * When beacon storing is supported - disable beacon filtering
+                * altogether - the latest beacon will be sent when exiting d0i3
+                */
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_BEACON_STORING))
+                       ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags,
+                                                            true);
+               else
+                       ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+                                                           flags, true);
        } else {
                if (mvmvif->bf_data.bf_enabled)
                        ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
index 0b762b4f8fadfb26b36f103b64308239edda9c23..2141db5bff82a28cbe2b5cc1280af264a5f0cc06 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -74,6 +76,9 @@ struct iwl_mvm_quota_iterator_data {
        int n_interfaces[MAX_BINDINGS];
        int colors[MAX_BINDINGS];
        int low_latency[MAX_BINDINGS];
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       int dbgfs_min[MAX_BINDINGS];
+#endif
        int n_low_latency_bindings;
        struct ieee80211_vif *disabled_vif;
 };
@@ -129,6 +134,12 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
 
        data->n_interfaces[id]++;
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_quota_min)
+               data->dbgfs_min[id] = max(data->dbgfs_min[id],
+                                         mvmvif->dbgfs_quota_min);
+#endif
+
        if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
                data->n_low_latency_bindings++;
                data->low_latency[id] = true;
@@ -259,6 +270,11 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
 
                if (data.n_interfaces[i] <= 0)
                        cmd.quotas[idx].quota = cpu_to_le32(0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               else if (data.dbgfs_min[i])
+                       cmd.quotas[idx].quota =
+                               cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+#endif
                else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
                         data.low_latency[i])
                        /*
index 7bb6fd0e4391a92f0200aad6e62362fc20c48802..6e7e78a378794a34174be26f2f9dcf01ba6403b3 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm,
        return 0;
 }
 
-static int rs_collect_tx_data(struct iwl_mvm *mvm,
-                             struct iwl_lq_sta *lq_sta,
-                             struct iwl_scale_tbl_info *tbl,
-                             int scale_index, int attempts, int successes,
-                             u8 reduced_txp)
+static int rs_collect_tpc_data(struct iwl_mvm *mvm,
+                              struct iwl_lq_sta *lq_sta,
+                              struct iwl_scale_tbl_info *tbl,
+                              int scale_index, int attempts, int successes,
+                              u8 reduced_txp)
+{
+       struct iwl_rate_scale_data *window = NULL;
+
+       if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+               return -EINVAL;
+
+       window = &tbl->tpc_win[reduced_txp];
+       return  _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+                                   window);
+}
+
+static int rs_collect_tlc_data(struct iwl_mvm *mvm,
+                              struct iwl_lq_sta *lq_sta,
+                              struct iwl_scale_tbl_info *tbl,
+                              int scale_index, int attempts, int successes)
 {
        struct iwl_rate_scale_data *window = NULL;
-       int ret;
 
        if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
                return -EINVAL;
@@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm,
 
        /* Select window for current tx bit rate */
        window = &(tbl->win[scale_index]);
-
-       ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
-                                 window);
-       if (ret)
-               return ret;
-
-       if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
-               return -EINVAL;
-
-       window = &tbl->tpc_win[reduced_txp];
        return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
                                   window);
 }
@@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
         * first index into rate scale table.
         */
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-               /* ampdu_ack_len = 0 marks no BA was received. In this case
-                * treat it as a single frame loss as we don't want the success
-                * ratio to dip too quickly because a BA wasn't received
+               rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+                                   info->status.ampdu_len,
+                                   info->status.ampdu_ack_len,
+                                   reduced_txp);
+
+               /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+                * it as a single frame loss as we don't want the success ratio
+                * to dip too quickly because a BA wasn't received.
+                * For TPC, there's no need for this optimisation since we want
+                * to recover very quickly from a bad power reduction and,
+                * therefore we'd like the success ratio to get an immediate hit
+                * when failing to get a BA, so we'd switch back to a lower or
+                * zero power reduction. When FW transmits agg with a rate
+                * different from the initial rate, it will not use reduced txp
+                * and will send BA notification twice (one empty with reduced
+                * txp equal to the value from LQ and one with reduced txp 0).
+                * We need to update counters for each txp level accordingly.
                 */
                if (info->status.ampdu_ack_len == 0)
                        info->status.ampdu_len = 1;
 
-               rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
-                                  info->status.ampdu_len,
-                                  info->status.ampdu_ack_len,
-                                  reduced_txp);
+               rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+                                   info->status.ampdu_len,
+                                   info->status.ampdu_ack_len);
 
                /* Update success/fail counts if not searching for new mode */
                if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                        else
                                continue;
 
-                       rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
-                                          1, i < retries ? 0 : legacy_success,
-                                          reduced_txp);
+                       rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+                                           lq_rate.index, 1,
+                                           i < retries ? 0 : legacy_success,
+                                           reduced_txp);
+                       rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
+                                           lq_rate.index, 1,
+                                           i < retries ? 0 : legacy_success);
                }
 
                /* Update success/fail counts if not searching for new mode */
@@ -2040,7 +2062,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
        }
 
        /* try decreasing first if applicable */
-       if (weak != TPC_INVALID) {
+       if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+           weak != TPC_INVALID) {
                if (weak_tpt == IWL_INVALID_VALUE &&
                    (strong_tpt == IWL_INVALID_VALUE ||
                     current_tpt >= strong_tpt)) {
index 0c073e02fd4cba9d07c8410c2ec1b4e99c46a8d7..615dea143d4e8df022991512dcac62b1fe48cf6a 100644 (file)
@@ -201,25 +201,22 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
                                        struct iwl_rx_mpdu_desc *desc,
                                        struct ieee80211_rx_status *rx_status)
 {
-       int energy_a, energy_b, energy_c, max_energy;
+       int energy_a, energy_b, max_energy;
 
        energy_a = desc->energy_a;
        energy_a = energy_a ? -energy_a : S8_MIN;
        energy_b = desc->energy_b;
        energy_b = energy_b ? -energy_b : S8_MIN;
-       energy_c = desc->energy_c;
-       energy_c = energy_c ? -energy_c : S8_MIN;
        max_energy = max(energy_a, energy_b);
-       max_energy = max(max_energy, energy_c);
 
-       IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
-                       energy_a, energy_b, energy_c, max_energy);
+       IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
+                       energy_a, energy_b, max_energy);
 
        rx_status->signal = max_energy;
        rx_status->chains = 0; /* TODO: phy info */
        rx_status->chain_signal[0] = energy_a;
        rx_status->chain_signal[1] = energy_b;
-       rx_status->chain_signal[2] = energy_c;
+       rx_status->chain_signal[2] = S8_MIN;
 }
 
 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
@@ -294,7 +291,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct ieee80211_rx_status *rx_status;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
-       struct ieee80211_hdr *hdr = (void *)(desc + 1);
+       struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
        u32 len = le16_to_cpu(desc->mpdu_len);
        u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
        struct ieee80211_sta *sta = NULL;
index 9a15642f80dd28b88dc4ae8326f465a7ec583e35..1e1ab9daaec9d330632455295af7d510cf2e9d5b 100644 (file)
@@ -930,8 +930,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
                return -ENOBUFS;
 
-       if (type == mvm->scan_type)
+       if (type == mvm->scan_type) {
+               IWL_DEBUG_SCAN(mvm,
+                              "Ignoring UMAC scan config of the same type\n");
                return 0;
+       }
 
        cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
 
@@ -1109,7 +1112,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params,
                                                                 vif));
 
-       if (type == IWL_MVM_SCAN_SCHED)
+       if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
                cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
 
        if (iwl_mvm_scan_use_ebs(mvm, vif))
@@ -1351,7 +1354,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
-               ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
+               ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
                ret = iwl_mvm_scan_lmac(mvm, vif, &params);
index b556e33658d734974d8534b271cf5e57ea755be3..4854e79cbda84c2693867f6f9d99071345506b28 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "sta.h"
 #include "rs.h"
 
+/*
+ * New version of ADD_STA_sta command added new fields at the end of the
+ * structure, so sending the size of the relevant API's structure is enough to
+ * support both API versions.
+ */
+static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
+{
+       return iwl_mvm_has_new_rx_api(mvm) ?
+               sizeof(struct iwl_mvm_add_sta_cmd) :
+               sizeof(struct iwl_mvm_add_sta_cmd_v7);
+}
+
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
                                    enum nl80211_iftype iftype)
 {
@@ -187,12 +201,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &add_sta_cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
                break;
@@ -357,12 +372,13 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
                               mvmsta->sta_id);
@@ -623,12 +639,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        if (addr)
                memcpy(cmd.addr, addr, ETH_ALEN);
 
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_INFO(mvm, "Internal station added.\n");
                return 0;
@@ -819,7 +836,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 #define IWL_MAX_RX_BA_SESSIONS 16
 
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                      int tid, u16 ssn, bool start)
+                      int tid, u16 ssn, bool start, u8 buf_size)
 {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd cmd = {};
@@ -839,6 +856,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (start) {
                cmd.add_immediate_ba_tid = (u8) tid;
                cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+               cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
        } else {
                cmd.remove_immediate_ba_tid = (u8) tid;
        }
@@ -846,12 +864,13 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                  STA_MODIFY_REMOVE_BA_TID;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
                               start ? "start" : "stopp");
@@ -904,12 +923,13 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                break;
        default:
@@ -1640,7 +1660,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+                                  iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1731,7 +1752,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 
        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
                                   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
-                                  sizeof(cmd), &cmd);
+                                  iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1766,7 +1787,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+                                  iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
index 39fdf5224e81741aacccec2f43f4783dd65b78fb..e3b9446ee99558236aa8175332dcd304f9bd93a5 100644 (file)
@@ -401,7 +401,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                      int tid, u16 ssn, bool start);
+                      int tid, u16 ssn, bool start, u8 buf_size);
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
index fb76004eede4c4f29cc5835e90d1cfbe8262247b..758d05a8c6aad22748c2fb7f812038913b3e77e6 100644 (file)
@@ -194,12 +194,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
        return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
 }
 
-int iwl_mvm_get_temp(struct iwl_mvm *mvm)
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
 {
        struct iwl_notification_wait wait_temp_notif;
        static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
                                            DTS_MEASUREMENT_NOTIF_WIDE) };
-       int ret, temp;
+       int ret;
 
        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
                temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
@@ -208,7 +208,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
                                   temp_notif, ARRAY_SIZE(temp_notif),
-                                  iwl_mvm_temp_notif_wait, &temp);
+                                  iwl_mvm_temp_notif_wait, temp);
 
        ret = iwl_mvm_get_temp_cmd(mvm);
        if (ret) {
@@ -219,12 +219,10 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 
        ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
                                    IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
-       if (ret) {
+       if (ret)
                IWL_ERR(mvm, "Getting the temperature timed out\n");
-               return ret;
-       }
 
-       return temp;
+       return ret;
 }
 
 static void check_exit_ctkill(struct work_struct *work)
@@ -233,6 +231,7 @@ static void check_exit_ctkill(struct work_struct *work)
        struct iwl_mvm *mvm;
        u32 duration;
        s32 temp;
+       int ret;
 
        tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
        mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
@@ -250,13 +249,13 @@ static void check_exit_ctkill(struct work_struct *work)
                goto reschedule;
        }
 
-       temp = iwl_mvm_get_temp(mvm);
+       ret = iwl_mvm_get_temp(mvm, &temp);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
 
        __iwl_mvm_mac_stop(mvm);
 
-       if (temp < 0)
+       if (ret)
                goto reschedule;
 
        IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
index 8bf48a7d0f4e99e8297feace5368375b6a16a151..8a1638ec7e28f65a1dc3550ed3464c84d0eed054 100644 (file)
@@ -736,6 +736,37 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
        iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
 }
 
+static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+                                           u32 status)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_tx_status *status_trig;
+       int i;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
+       status_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
+               /* don't collect on status 0 */
+               if (!status_trig->statuses[i].status)
+                       break;
+
+               if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
+                       continue;
+
+               iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                           "Tx status %d was received",
+                                           status & TX_STATUS_MSK);
+               break;
+       }
+}
+
 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                                     struct iwl_rx_packet *pkt)
 {
@@ -784,6 +815,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        break;
                }
 
+               iwl_mvm_tx_status_check_trigger(mvm, status);
+
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
                                            info);
@@ -1029,7 +1062,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
                mvmsta->tid_data[tid].rate_n_flags =
                        le32_to_cpu(tx_resp->initial_rate);
-               mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
                mvmsta->tid_data[tid].tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
        }
@@ -1060,7 +1092,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
        /* TODO: not accounted if the whole A-MPDU failed */
        info->status.tx_time = tid_data->tx_time;
        info->status.status_driver_data[0] =
-               (void *)(uintptr_t)tid_data->reduced_tpc;
+               (void *)(uintptr_t)ba_notif->reduced_txp;
        info->status.status_driver_data[1] =
                (void *)(uintptr_t)tid_data->rate_n_flags;
 }
@@ -1133,6 +1165,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                           scd_flow, ba_resp_scd_ssn, ba_notif->txed,
                           ba_notif->txed_2_done);
 
+       IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+                          ba_notif->reduced_txp);
        tid_data->next_reclaimed = ba_resp_scd_ssn;
 
        iwl_mvm_check_ratid_empty(mvm, sta, tid);
index 3a989f5c20db2b1a360361be970a97266f52ad25..59453c17658086ac757e2124f46cacfc842c4500 100644 (file)
@@ -937,18 +937,16 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
 }
 
 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              bool value)
+                              bool prev)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int res;
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (mvmvif->low_latency == value)
+       if (iwl_mvm_vif_low_latency(mvmvif) == prev)
                return 0;
 
-       mvmvif->low_latency = value;
-
        res = iwl_mvm_update_quotas(mvm, false, NULL);
        if (res)
                return res;
index 6261a68cae907d793cdbf62ceb9dda97e5031fee..753ec6785912f7f3eed9cab797d6216633582ad6 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -66,6 +67,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
 #include <linux/acpi.h>
@@ -378,7 +380,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
 
 /* 3168 Series */
+       {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
 
 /* 7265 Series */
@@ -475,6 +480,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
@@ -623,6 +629,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto out_free_drv;
 
+       /* if RTPM is in use, enable it in our device */
+       if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        iwlwifi_mod_params.d0i3_entry_delay);
+               pm_runtime_use_autosuspend(&pdev->dev);
+               pm_runtime_allow(&pdev->dev);
+       }
+
        return 0;
 
 out_free_drv:
@@ -689,15 +704,132 @@ static int iwl_pci_resume(struct device *device)
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       if (test_bit(STATUS_FW_ERROR, &trans->status))
+               return 0;
+
+       set_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+       /* config the fw */
+       ret = iwl_op_mode_enter_d0i3(trans->op_mode);
+       if (ret == 1) {
+               IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n");
+               clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+               return -EBUSY;
+       }
+       if (ret)
+               goto err;
+
+       ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+                                test_bit(STATUS_TRANS_IDLE, &trans->status),
+                                msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+       if (!ret) {
+               IWL_ERR(trans, "Timeout entering D0i3\n");
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+       return 0;
+err:
+       clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+       iwl_trans_fw_error(trans);
+       return ret;
+}
+
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       /* sometimes a D0i3 entry is not followed through */
+       if (!test_bit(STATUS_TRANS_IDLE, &trans->status))
+               return 0;
+
+       /* config the fw */
+       ret = iwl_op_mode_exit_d0i3(trans->op_mode);
+       if (ret)
+               goto err;
+
+       /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */
+
+       ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+                                !test_bit(STATUS_TRANS_IDLE, &trans->status),
+                                msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+       if (!ret) {
+               IWL_ERR(trans, "Timeout exiting D0i3\n");
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       return 0;
+err:
+       clear_bit(STATUS_TRANS_IDLE, &trans->status);
+       iwl_trans_fw_error(trans);
+       return ret;
+}
+
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+static int iwl_pci_runtime_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_trans *trans = pci_get_drvdata(pdev);
+       int ret;
+
+       IWL_DEBUG_RPM(trans, "entering runtime suspend\n");
+
+       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+               ret = iwl_pci_fw_enter_d0i3(trans);
+               if (ret < 0)
+                       return ret;
+       }
+
+       trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+
+       iwl_trans_d3_suspend(trans, false, false);
+
+       return 0;
+}
+
+static int iwl_pci_runtime_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_trans *trans = pci_get_drvdata(pdev);
+       enum iwl_d3_status d3_status;
+
+       IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n");
+
+       iwl_trans_d3_resume(trans, &d3_status, false, false);
+
+       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+               return iwl_pci_fw_exit_d0i3(trans);
+
+       return 0;
+}
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
+static const struct dev_pm_ops iwl_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
+                               iwl_pci_resume)
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+       SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
+                          iwl_pci_runtime_resume,
+                          NULL)
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+};
 
 #define IWL_PM_OPS     (&iwl_dev_pm_ops)
 
-#else
+#else /* CONFIG_PM_SLEEP */
 
 #define IWL_PM_OPS     NULL
 
-#endif
+#endif /* CONFIG_PM_SLEEP */
 
 static struct pci_driver iwl_pci_driver = {
        .name = DRV_NAME,
index cc3888e2700daf1cfe07ebb4e003ca53bcf6d934..2f959162d045671aaf40723a52c292f9d5e623e4 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
 #define RX_NUM_QUEUES 1
 #define RX_POST_REQ_ALLOC 2
 #define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
+#define RX_PENDING_WATERMARK 16
 
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
  * trans_pcie layer */
 
+/**
+ * struct iwl_rx_mem_buffer
+ * @page_dma: bus address of rxb page
+ * @page: driver's pointer to the rxb page
+ * @vid: index of this rxb in the global table
+ */
 struct iwl_rx_mem_buffer {
        dma_addr_t page_dma;
        struct page *page;
+       u16 vid;
        struct list_head list;
 };
 
@@ -90,8 +97,12 @@ struct isr_statistics {
 
 /**
  * struct iwl_rxq - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @id: queue index
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
+ *     Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
@@ -103,32 +114,34 @@ struct isr_statistics {
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
+ * @queue: actual rx queue. Not used for multi-rx queue.
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
-       __le32 *bd;
+       int id;
+       void *bd;
        dma_addr_t bd_dma;
+       __le32 *used_bd;
+       dma_addr_t used_bd_dma;
        u32 read;
        u32 write;
        u32 free_count;
        u32 used_count;
        u32 write_actual;
+       u32 queue_size;
        struct list_head rx_free;
        struct list_head rx_used;
        bool need_update;
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+       struct napi_struct napi;
        struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 };
 
 /**
  * struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
  * @req_pending: number of requests the allcator had not processed yet
  * @req_ready: number of requests honored and ready for claiming
  * @rbd_allocated: RBDs with pages allocated and ready to be handled to
@@ -140,7 +153,6 @@ struct iwl_rxq {
  * @rx_alloc: work struct for background calls
  */
 struct iwl_rb_allocator {
-       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
        atomic_t req_pending;
        atomic_t req_ready;
        struct list_head rbd_allocated;
@@ -280,6 +292,7 @@ struct iwl_txq {
        bool ampdu;
        bool block;
        unsigned long wd_timeout;
+       struct sk_buff_head overflow_q;
 };
 
 static inline dma_addr_t
@@ -297,6 +310,8 @@ struct iwl_tso_hdr_page {
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
+ * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
+ * @global_table: table mapping received VID from hw to rxb
  * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
@@ -323,13 +338,14 @@ struct iwl_tso_hdr_page {
  * @fw_mon_size: size of the buffer for the firmware monitor
  */
 struct iwl_trans_pcie {
-       struct iwl_rxq rxq;
+       struct iwl_rxq *rxq;
+       struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE];
+       struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
        struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
        struct net_device napi_dev;
-       struct napi_struct napi;
 
        struct __percpu iwl_tso_hdr_page *tso_hdr_page;
 
@@ -359,6 +375,7 @@ struct iwl_trans_pcie {
        bool ucode_write_complete;
        wait_queue_head_t ucode_write_waitq;
        wait_queue_head_t wait_command_queue;
+       wait_queue_head_t d0i3_waitq;
 
        u8 cmd_queue;
        u8 cmd_fifo;
@@ -579,4 +596,7 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
 }
 #endif
 
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
+
 #endif /* __iwl_trans_int_pcie_h__ */
index ccafbd8cf4b3b224649b3fbc87da69207884743c..51314e56209d3c07972dcba4b6b41aa231cd3ad7 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  */
 static int iwl_rxq_space(const struct iwl_rxq *rxq)
 {
-       /* Make sure RX_QUEUE_SIZE is a power of 2 */
-       BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+       /* Make sure rx queue size is a power of 2 */
+       WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
 
        /*
         * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
@@ -149,7 +150,7 @@ static int iwl_rxq_space(const struct iwl_rxq *rxq)
         * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
         * defined for negative dividends.
         */
-       return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
+       return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
 }
 
 /*
@@ -160,6 +161,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
        return cpu_to_le32((u32)(dma_addr >> 8));
 }
 
+static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+       iwl_write_prph(trans, ofs, val & 0xffffffff);
+       iwl_write_prph(trans, ofs + 4, val >> 32);
+}
+
 /*
  * iwl_pcie_rx_stop - stops the Rx DMA
  */
@@ -173,10 +180,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
 /*
  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  */
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
+                                   struct iwl_rxq *rxq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        u32 reg;
 
        lockdep_assert_held(&rxq->lock);
@@ -201,24 +207,73 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
        }
 
        rxq->write_actual = round_down(rxq->write, 8);
-       iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+       if (trans->cfg->mq_rx_supported)
+               iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
+                              rxq->write_actual);
+       else
+               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       int i;
 
-       spin_lock(&rxq->lock);
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               if (!rxq->need_update)
+                       continue;
+               spin_lock(&rxq->lock);
+               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+               rxq->need_update = false;
+               spin_unlock(&rxq->lock);
+       }
+}
+
+static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
+                                   struct iwl_rxq *rxq)
+{
+       struct iwl_rx_mem_buffer *rxb;
+
+       /*
+        * If the device isn't enabled - no need to try to add buffers...
+        * This can happen when we stop the device and still have an interrupt
+        * pending. We stop the APM before we sync the interrupts because we
+        * have to (see comment there). On the other hand, since the APM is
+        * stopped, we cannot access the HW (in particular not prph).
+        * So don't try to restock if the APM has been already stopped.
+        */
+       if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+               return;
 
-       if (!rxq->need_update)
-               goto exit_unlock;
+       spin_lock(&rxq->lock);
+       while (rxq->free_count) {
+               __le64 *bd = (__le64 *)rxq->bd;
 
-       iwl_pcie_rxq_inc_wr_ptr(trans);
-       rxq->need_update = false;
+               /* Get next free Rx buffer, remove from free list */
+               rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+                                      list);
+               list_del(&rxb->list);
 
- exit_unlock:
+               /* 12 first bits are expected to be empty */
+               WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+               /* Point to Rx buffer via next RBD in circular buffer */
+               bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+               rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+               rxq->free_count--;
+       }
        spin_unlock(&rxq->lock);
+
+       /*
+        * If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8.
+        */
+       if (rxq->write_actual != (rxq->write & ~0x7)) {
+               spin_lock(&rxq->lock);
+               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+               spin_unlock(&rxq->lock);
+       }
 }
 
 /*
@@ -232,10 +287,8 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
  * also updates the memory address in the firmware to reference the new
  * target buffer.
  */
-static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
+static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
 
        /*
@@ -251,6 +304,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 
        spin_lock(&rxq->lock);
        while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+               __le32 *bd = (__le32 *)rxq->bd;
                /* The overwritten rxb must be a used one */
                rxb = rxq->queue[rxq->write];
                BUG_ON(rxb && rxb->page);
@@ -261,7 +315,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                list_del(&rxb->list);
 
                /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+               bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
                rxq->queue[rxq->write] = rxb;
                rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
                rxq->free_count--;
@@ -272,7 +326,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
                spin_lock(&rxq->lock);
-               iwl_pcie_rxq_inc_wr_ptr(trans);
+               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
                spin_unlock(&rxq->lock);
        }
 }
@@ -285,13 +339,9 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
                                           gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct page *page;
        gfp_t gfp_mask = priority;
 
-       if (rxq->free_count > RX_LOW_WATERMARK)
-               gfp_mask |= __GFP_NOWARN;
-
        if (trans_pcie->rx_page_order > 0)
                gfp_mask |= __GFP_COMP;
 
@@ -301,16 +351,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
                if (net_ratelimit())
                        IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
                                       trans_pcie->rx_page_order);
-               /* Issue an error if the hardware has consumed more than half
-                * of its free buffer list and we don't have enough
-                * pre-allocated buffers.
+               /*
+                * Issue an error if we don't have enough pre-allocated
+                 * buffers.
 `               */
-               if (rxq->free_count <= RX_LOW_WATERMARK &&
-                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
-                   net_ratelimit())
+               if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
                        IWL_CRIT(trans,
-                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
-                                rxq->free_count);
+                                "Failed to alloc_pages\n");
                return NULL;
        }
        return page;
@@ -325,10 +372,10 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+                                  struct iwl_rxq *rxq)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
 
@@ -372,10 +419,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
                        __free_pages(page, trans_pcie->rx_page_order);
                        return;
                }
-               /* dma address must be no more than 36 bits */
-               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-               /* and also 256 byte aligned! */
-               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
                spin_lock(&rxq->lock);
 
@@ -386,40 +429,23 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
        }
 }
 
-static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        int i;
 
-       lockdep_assert_held(&rxq->lock);
-
-       for (i = 0; i < RX_QUEUE_SIZE; i++) {
-               if (!rxq->pool[i].page)
+       for (i = 0; i < MQ_RX_POOL_SIZE; i++) {
+               if (!trans_pcie->rx_pool[i].page)
                        continue;
-               dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+               dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
                               PAGE_SIZE << trans_pcie->rx_page_order,
                               DMA_FROM_DEVICE);
-               __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
-               rxq->pool[i].page = NULL;
+               __free_pages(trans_pcie->rx_pool[i].page,
+                            trans_pcie->rx_page_order);
+               trans_pcie->rx_pool[i].page = NULL;
        }
 }
 
-/*
- * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
- *
- * When moving to rx_free an page is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
- */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
-       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
-       iwl_pcie_rxq_restock(trans);
-}
-
 /*
  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
  *
@@ -444,6 +470,11 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
        while (pending) {
                int i;
                struct list_head local_allocated;
+               gfp_t gfp_mask = GFP_KERNEL;
+
+               /* Do not post a warning if there are only a few requests */
+               if (pending < RX_PENDING_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
 
                INIT_LIST_HEAD(&local_allocated);
 
@@ -463,7 +494,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                        BUG_ON(rxb->page);
 
                        /* Alloc a new receive buffer */
-                       page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+                       page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
                        if (!page)
                                continue;
                        rxb->page = page;
@@ -477,10 +508,6 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                                __free_pages(page, trans_pcie->rx_page_order);
                                continue;
                        }
-                       /* dma address must be no more than 36 bits */
-                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-                       /* and also 256 byte aligned! */
-                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
                        /* move the allocated entry to the out list */
                        list_move(&rxb->list, &local_allocated);
@@ -561,38 +588,83 @@ static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
+       int i;
+       int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
+                                                     sizeof(__le32);
+
+       if (WARN_ON(trans_pcie->rxq))
+               return -EINVAL;
 
-       memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+       trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+                                 GFP_KERNEL);
+       if (!trans_pcie->rxq)
+               return -EINVAL;
 
-       spin_lock_init(&rxq->lock);
        spin_lock_init(&rba->lock);
 
-       if (WARN_ON(rxq->bd || rxq->rb_stts))
-               return -EINVAL;
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-       /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
-       rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-                                     &rxq->bd_dma, GFP_KERNEL);
-       if (!rxq->bd)
-               goto err_bd;
+               spin_lock_init(&rxq->lock);
+               if (trans->cfg->mq_rx_supported)
+                       rxq->queue_size = MQ_RX_TABLE_SIZE;
+               else
+                       rxq->queue_size = RX_QUEUE_SIZE;
 
-       /*Allocate the driver's pointer to receive buffer status */
-       rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
-                                          &rxq->rb_stts_dma, GFP_KERNEL);
-       if (!rxq->rb_stts)
-               goto err_rb_stts;
+               /*
+                * Allocate the circular buffer of Read Buffer Descriptors
+                * (RBDs)
+                */
+               rxq->bd = dma_zalloc_coherent(dev,
+                                            free_size * rxq->queue_size,
+                                            &rxq->bd_dma, GFP_KERNEL);
+               if (!rxq->bd)
+                       goto err;
+
+               if (trans->cfg->mq_rx_supported) {
+                       rxq->used_bd = dma_zalloc_coherent(dev,
+                                                          sizeof(__le32) *
+                                                          rxq->queue_size,
+                                                          &rxq->used_bd_dma,
+                                                          GFP_KERNEL);
+                       if (!rxq->used_bd)
+                               goto err;
+               }
 
+               /*Allocate the driver's pointer to receive buffer status */
+               rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+                                                  &rxq->rb_stts_dma,
+                                                  GFP_KERNEL);
+               if (!rxq->rb_stts)
+                       goto err;
+       }
        return 0;
 
-err_rb_stts:
-       dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-                         rxq->bd, rxq->bd_dma);
-       rxq->bd_dma = 0;
-       rxq->bd = NULL;
-err_bd:
+err:
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               if (rxq->bd)
+                       dma_free_coherent(dev, free_size * rxq->queue_size,
+                                         rxq->bd, rxq->bd_dma);
+               rxq->bd_dma = 0;
+               rxq->bd = NULL;
+
+               if (rxq->rb_stts)
+                       dma_free_coherent(trans->dev,
+                                         sizeof(struct iwl_rb_status),
+                                         rxq->rb_stts, rxq->rb_stts_dma);
+
+               if (rxq->used_bd)
+                       dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
+                                         rxq->used_bd, rxq->used_bd_dma);
+               rxq->used_bd_dma = 0;
+               rxq->used_bd = NULL;
+       }
+       kfree(trans_pcie->rxq);
+
        return -ENOMEM;
 }
 
@@ -659,65 +731,103 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
                iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
 }
 
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 rb_size, enabled = 0;
        int i;
 
-       lockdep_assert_held(&rxq->lock);
-
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
-       rxq->free_count = 0;
-       rxq->used_count = 0;
+       switch (trans_pcie->rx_buf_size) {
+       case IWL_AMSDU_4K:
+               rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+               break;
+       case IWL_AMSDU_8K:
+               rb_size = RFH_RXF_DMA_RB_SIZE_8K;
+               break;
+       case IWL_AMSDU_12K:
+               rb_size = RFH_RXF_DMA_RB_SIZE_12K;
+               break;
+       default:
+               WARN_ON(1);
+               rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+       }
 
-       for (i = 0; i < RX_QUEUE_SIZE; i++)
-               list_add(&rxq->pool[i].list, &rxq->rx_used);
-}
+       /* Stop Rx DMA */
+       iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+       /* disable free amd used rx queue operation */
+       iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
+
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               /* Tell device where to find RBD free table in DRAM */
+               iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
+                                      (u64)(trans_pcie->rxq[i].bd_dma));
+               /* Tell device where to find RBD used table in DRAM */
+               iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
+                                      (u64)(trans_pcie->rxq[i].used_bd_dma));
+               /* Tell device where in DRAM to update its Rx status */
+               iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
+                                      trans_pcie->rxq[i].rb_stts_dma);
+               /* Reset device indice tables */
+               iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+               iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+               iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
+
+               enabled |= BIT(i) | BIT(i + 16);
+       }
 
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
-       int i;
+       /* restock default queue */
+       iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
 
-       lockdep_assert_held(&rba->lock);
+       /*
+        * Enable Rx DMA
+        * Single frame mode
+        * Rx buffer size 4 or 8k or 12k
+        * Min RB size 4 or 8
+        * 512 RBDs
+        */
+       iwl_write_prph(trans, RFH_RXF_DMA_CFG,
+                      RFH_DMA_EN_ENABLE_VAL |
+                      rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+                      RFH_RXF_DMA_MIN_RB_4_8 |
+                      RFH_RXF_DMA_RBDCB_SIZE_512);
 
-       INIT_LIST_HEAD(&rba->rbd_allocated);
-       INIT_LIST_HEAD(&rba->rbd_empty);
+       iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
+                                         RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+       iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
 
-       for (i = 0; i < RX_POOL_SIZE; i++)
-               list_add(&rba->pool[i].list, &rba->rbd_empty);
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
 }
 
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i;
+       lockdep_assert_held(&rxq->lock);
 
-       lockdep_assert_held(&rba->lock);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       rxq->free_count = 0;
+       rxq->used_count = 0;
+}
 
-       for (i = 0; i < RX_POOL_SIZE; i++) {
-               if (!rba->pool[i].page)
-                       continue;
-               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
-                              PAGE_SIZE << trans_pcie->rx_page_order,
-                              DMA_FROM_DEVICE);
-               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
-               rba->pool[i].page = NULL;
-       }
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+       WARN_ON(1);
+       return 0;
 }
 
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rxq *def_rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i, err;
+       int i, err, num_rbds, allocator_pool_size;
 
-       if (!rxq->bd) {
+       if (!trans_pcie->rxq) {
                err = iwl_pcie_rx_alloc(trans);
                if (err)
                        return err;
        }
+       def_rxq = trans_pcie->rxq;
        if (!rba->alloc_wq)
                rba->alloc_wq = alloc_workqueue("rb_allocator",
                                                WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -726,34 +836,68 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        spin_lock(&rba->lock);
        atomic_set(&rba->req_pending, 0);
        atomic_set(&rba->req_ready, 0);
-       /* free all first - we might be reconfigured for a different size */
-       iwl_pcie_rx_free_rba(trans);
-       iwl_pcie_rx_init_rba(rba);
+       INIT_LIST_HEAD(&rba->rbd_allocated);
+       INIT_LIST_HEAD(&rba->rbd_empty);
        spin_unlock(&rba->lock);
 
-       spin_lock(&rxq->lock);
-
        /* free all first - we might be reconfigured for a different size */
-       iwl_pcie_rxq_free_rbs(trans);
-       iwl_pcie_rx_init_rxb_lists(rxq);
+       iwl_pcie_free_rbs_pool(trans);
 
        for (i = 0; i < RX_QUEUE_SIZE; i++)
-               rxq->queue[i] = NULL;
+               def_rxq->queue[i] = NULL;
 
-       /* Set us so that we have processed and used all buffers, but have
-        * not restocked the Rx queue with fresh buffers */
-       rxq->read = rxq->write = 0;
-       rxq->write_actual = 0;
-       memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
-       spin_unlock(&rxq->lock);
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-       iwl_pcie_rx_replenish(trans);
+               rxq->id = i;
 
-       iwl_pcie_rx_hw_init(trans, rxq);
+               spin_lock(&rxq->lock);
+               /*
+                * Set read write pointer to reflect that we have processed
+                * and used all buffers, but have not restocked the Rx queue
+                * with fresh buffers
+                */
+               rxq->read = 0;
+               rxq->write = 0;
+               rxq->write_actual = 0;
+               memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 
-       spin_lock(&rxq->lock);
-       iwl_pcie_rxq_inc_wr_ptr(trans);
-       spin_unlock(&rxq->lock);
+               iwl_pcie_rx_init_rxb_lists(rxq);
+
+               if (!rxq->napi.poll)
+                       netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+                                      iwl_pcie_dummy_napi_poll, 64);
+
+               spin_unlock(&rxq->lock);
+       }
+
+       /* move the pool to the default queue and allocator ownerships */
+       num_rbds = trans->cfg->mq_rx_supported ?
+                    MQ_RX_POOL_SIZE : RX_QUEUE_SIZE;
+       allocator_pool_size = trans->num_rx_queues *
+               (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
+       for (i = 0; i < num_rbds; i++) {
+               struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
+
+               if (i < allocator_pool_size)
+                       list_add(&rxb->list, &rba->rbd_empty);
+               else
+                       list_add(&rxb->list, &def_rxq->rx_used);
+               trans_pcie->global_table[i] = rxb;
+               rxb->vid = (u16)i;
+       }
+
+       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+       if (trans->cfg->mq_rx_supported) {
+               iwl_pcie_rx_mq_hw_init(trans);
+       } else {
+               iwl_pcie_rxq_restock(trans, def_rxq);
+               iwl_pcie_rx_hw_init(trans, def_rxq);
+       }
+
+       spin_lock(&def_rxq->lock);
+       iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
+       spin_unlock(&def_rxq->lock);
 
        return 0;
 }
@@ -761,12 +905,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
+                                             sizeof(__le32);
+       int i;
 
-       /*if rxq->bd is NULL, it means that nothing has been allocated,
-        * exit now */
-       if (!rxq->bd) {
+       /*
+        * if rxq is NULL, it means that nothing has been allocated,
+        * exit now
+        */
+       if (!trans_pcie->rxq) {
                IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
                return;
        }
@@ -777,27 +925,37 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                rba->alloc_wq = NULL;
        }
 
-       spin_lock(&rba->lock);
-       iwl_pcie_rx_free_rba(trans);
-       spin_unlock(&rba->lock);
-
-       spin_lock(&rxq->lock);
-       iwl_pcie_rxq_free_rbs(trans);
-       spin_unlock(&rxq->lock);
-
-       dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
-                         rxq->bd, rxq->bd_dma);
-       rxq->bd_dma = 0;
-       rxq->bd = NULL;
-
-       if (rxq->rb_stts)
-               dma_free_coherent(trans->dev,
-                                 sizeof(struct iwl_rb_status),
-                                 rxq->rb_stts, rxq->rb_stts_dma);
-       else
-               IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
-       rxq->rb_stts_dma = 0;
-       rxq->rb_stts = NULL;
+       iwl_pcie_free_rbs_pool(trans);
+
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               if (rxq->bd)
+                       dma_free_coherent(trans->dev,
+                                         free_size * rxq->queue_size,
+                                         rxq->bd, rxq->bd_dma);
+               rxq->bd_dma = 0;
+               rxq->bd = NULL;
+
+               if (rxq->rb_stts)
+                       dma_free_coherent(trans->dev,
+                                         sizeof(struct iwl_rb_status),
+                                         rxq->rb_stts, rxq->rb_stts_dma);
+               else
+                       IWL_DEBUG_INFO(trans,
+                                      "Free rxq->rb_stts which is NULL\n");
+
+               if (rxq->used_bd)
+                       dma_free_coherent(trans->dev,
+                                         sizeof(__le32) * rxq->queue_size,
+                                         rxq->used_bd, rxq->used_bd_dma);
+               rxq->used_bd_dma = 0;
+               rxq->used_bd = NULL;
+
+               if (rxq->napi.poll)
+                       netif_napi_del(&rxq->napi);
+       }
+       kfree(trans_pcie->rxq);
 }
 
 /*
@@ -841,11 +999,11 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
 }
 
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+                               struct iwl_rxq *rxq,
                                struct iwl_rx_mem_buffer *rxb,
                                bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
        bool page_stolen = false;
        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -911,7 +1069,12 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
+               if (rxq->id == 0)
+                       iwl_op_mode_rx(trans->op_mode, &rxq->napi,
+                                      &rxcb);
+               else
+                       iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
+                                          &rxcb, rxq->id);
 
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
@@ -975,7 +1138,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rxq *rxq = &trans_pcie->rxq[0];
        u32 r, i, j, count = 0;
        bool emergency = false;
 
@@ -993,16 +1156,26 @@ restart:
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
-               if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+               if (unlikely(rxq->used_count == rxq->queue_size / 2))
                        emergency = true;
 
-               rxb = rxq->queue[i];
-               rxq->queue[i] = NULL;
+               if (trans->cfg->mq_rx_supported) {
+                       /*
+                        * used_bd is a 32 bit but only 12 are used to retrieve
+                        * the vid
+                        */
+                       u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]);
+
+                       rxb = trans_pcie->global_table[vid];
+               } else {
+                       rxb = rxq->queue[i];
+                       rxq->queue[i] = NULL;
+               }
 
                IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
-               iwl_pcie_rx_handle_rb(trans, rxb, emergency);
+               iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
 
-               i = (i + 1) & RX_QUEUE_MASK;
+               i = (i + 1) & (rxq->queue_size - 1);
 
                /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
                 * try to claim the pre-allocated buffers from the allocator */
@@ -1040,10 +1213,10 @@ restart:
                        count++;
                        if (count == 8) {
                                count = 0;
-                               if (rxq->used_count < RX_QUEUE_SIZE / 3)
+                               if (rxq->used_count < rxq->queue_size / 3)
                                        emergency = false;
                                spin_unlock(&rxq->lock);
-                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
                                spin_lock(&rxq->lock);
                        }
                }
@@ -1055,7 +1228,10 @@ restart:
                if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
                        rxq->read = i;
                        spin_unlock(&rxq->lock);
-                       iwl_pcie_rxq_restock(trans);
+                       if (trans->cfg->mq_rx_supported)
+                               iwl_pcie_rxq_mq_restock(trans, rxq);
+                       else
+                               iwl_pcie_rxq_restock(trans, rxq);
                        goto restart;
                }
        }
@@ -1077,10 +1253,10 @@ restart:
         * will be restocked by the next call of iwl_pcie_rxq_restock.
         */
        if (unlikely(emergency && count))
-               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
 
-       if (trans_pcie->napi.poll)
-               napi_gro_flush(&trans_pcie->napi, false);
+       if (rxq->napi.poll)
+               napi_gro_flush(&rxq->napi, false);
 }
 
 /*
index d60a467a983c6f220384b701953eb42d2cc4c1ae..b796952da644ca0a32ad4bf1cf5851b984fdcf88 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/bitops.h>
 #include <linux/gfp.h>
 #include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
 
 #include "iwl-drv.h"
 #include "iwl-trans.h"
@@ -1218,11 +1219,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
                _iwl_trans_pcie_stop_device(trans, true);
 }
 
-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
+                                     bool reset)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+       if (!reset) {
                /* Enable persistence mode to avoid reset */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                            CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
@@ -1246,7 +1248,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-       if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
+       if (reset) {
                /*
                 * reset TX queues -- some of their registers reset during S3
                 * so if we don't reset everything here the D3 image would try
@@ -1260,7 +1262,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 
 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                                    enum iwl_d3_status *status,
-                                   bool test)
+                                   bool test,  bool reset)
 {
        u32 val;
        int ret;
@@ -1295,7 +1297,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
        iwl_pcie_set_pwr(trans, false);
 
-       if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+       if (!reset) {
                iwl_clear_bit(trans, CSR_GP_CNTRL,
                              CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        } else {
@@ -1353,6 +1355,10 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
        /* ... rfkill can call stop_device and set it false if needed */
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
+       /* Make sure we sync here, because we'll need full access later */
+       if (low_power)
+               pm_runtime_resume(trans->dev);
+
        return 0;
 }
 
@@ -1422,12 +1428,6 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
 }
 
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
-{
-       WARN_ON(1);
-       return 0;
-}
-
 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
                                     const struct iwl_trans_config *trans_cfg)
 {
@@ -1464,11 +1464,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
         * As this function may be called again in some corner cases don't
         * do anything if NAPI was already initialized.
         */
-       if (!trans_pcie->napi.poll) {
+       if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
                init_dummy_netdev(&trans_pcie->napi_dev);
-               netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
-                              iwl_pcie_dummy_napi_poll, 64);
-       }
 }
 
 void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1476,6 +1473,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i;
 
+       /* TODO: check if this is really needed */
+       pm_runtime_disable(trans->dev);
+
        synchronize_irq(trans_pcie->pci_dev->irq);
 
        iwl_pcie_tx_free(trans);
@@ -1489,9 +1489,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        pci_release_regions(trans_pcie->pci_dev);
        pci_disable_device(trans_pcie->pci_dev);
 
-       if (trans_pcie->napi.poll)
-               netif_napi_del(&trans_pcie->napi);
-
        iwl_pcie_free_fw_monitor(trans);
 
        for_each_possible_cpu(i) {
@@ -1831,6 +1828,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans)
        spin_lock_irqsave(&trans_pcie->ref_lock, flags);
        IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
        trans_pcie->ref_count++;
+       pm_runtime_get(&trans_pcie->pci_dev->dev);
        spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
 }
 
@@ -1849,6 +1847,10 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans)
                return;
        }
        trans_pcie->ref_count--;
+
+       pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
+       pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
+
        spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
 }
 
@@ -2001,29 +2003,48 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
 {
        struct iwl_trans *trans = file->private_data;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
-       char buf[256];
-       int pos = 0;
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
-                                               rxq->read);
-       pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
-                                               rxq->write);
-       pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
-                                               rxq->write_actual);
-       pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
-                                               rxq->need_update);
-       pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
-                                               rxq->free_count);
-       if (rxq->rb_stts) {
-               pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
-                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
-       } else {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                       "closed_rb_num: Not Allocated\n");
+       char *buf;
+       int pos = 0, i, ret;
+       size_t bufsz = sizeof(buf);
+
+       bufsz = sizeof(char) * 121 * trans->num_rx_queues;
+
+       if (!trans_pcie->rxq)
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
+                                i);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
+                                rxq->read);
+               pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
+                                rxq->write);
+               pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
+                                rxq->write_actual);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
+                                rxq->need_update);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
+                                rxq->free_count);
+               if (rxq->rb_stts) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                                        "\tclosed_rb_num: %u\n",
+                                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &
+                                        0x0FFF);
+               } else {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                                        "\tclosed_rb_num: Not Allocated\n");
+       }
        }
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+
+       return ret;
 }
 
 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
@@ -2188,7 +2209,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+       struct iwl_rxq *rxq = &trans_pcie->rxq[0];
        u32 i, r, j, rb_len = 0;
 
        spin_lock(&rxq->lock);
@@ -2383,7 +2405,8 @@ static struct iwl_trans_dump_data
        u32 len, num_rbs;
        u32 monitor_len;
        int i, ptr;
-       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
+       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+                       !trans->cfg->mq_rx_supported;
 
        /* transport dump header */
        len = sizeof(*dump_data);
@@ -2438,11 +2461,12 @@ static struct iwl_trans_dump_data
        len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
 
        if (dump_rbs) {
+               /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+               struct iwl_rxq *rxq = &trans_pcie->rxq[0];
                /* RBs */
-               num_rbs = le16_to_cpu(ACCESS_ONCE(
-                                     trans_pcie->rxq.rb_stts->closed_rb_num))
+               num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
                                      & 0x0FFF;
-               num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+               num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
                len += num_rbs * (sizeof(*data) +
                                  sizeof(struct iwl_fw_error_dump_rb) +
                                  (PAGE_SIZE << trans_pcie->rx_page_order));
@@ -2493,6 +2517,22 @@ static struct iwl_trans_dump_data
        return dump_data;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+               return iwl_pci_fw_enter_d0i3(trans);
+
+       return 0;
+}
+
+static void iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+               iwl_pci_fw_exit_d0i3(trans);
+}
+#endif /* CONFIG_PM_SLEEP */
+
 static const struct iwl_trans_ops trans_ops_pcie = {
        .start_hw = iwl_trans_pcie_start_hw,
        .op_mode_leave = iwl_trans_pcie_op_mode_leave,
@@ -2503,6 +2543,11 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .d3_suspend = iwl_trans_pcie_d3_suspend,
        .d3_resume = iwl_trans_pcie_d3_resume,
 
+#ifdef CONFIG_PM_SLEEP
+       .suspend = iwl_trans_pcie_suspend,
+       .resume = iwl_trans_pcie_resume,
+#endif /* CONFIG_PM_SLEEP */
+
        .send_cmd = iwl_trans_pcie_send_hcmd,
 
        .tx = iwl_trans_pcie_tx,
@@ -2541,7 +2586,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
        u16 pci_cmd;
-       int ret;
+       int ret, addr_size;
 
        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
                                &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2579,11 +2624,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                       PCIE_LINK_STATE_CLKPM);
        }
 
+       if (cfg->mq_rx_supported)
+               addr_size = 64;
+       else
+               addr_size = 36;
+
        pci_set_master(pdev);
 
-       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
        if (!ret)
-               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+               ret = pci_set_consistent_dma_mask(pdev,
+                                                 DMA_BIT_MASK(addr_size));
        if (ret) {
                ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (!ret)
@@ -2686,6 +2737,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
+       init_waitqueue_head(&trans_pcie->d0i3_waitq);
+
        ret = iwl_pcie_alloc_ict(trans);
        if (ret)
                goto out_pci_disable_msi;
@@ -2700,6 +2753,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        trans_pcie->inta_mask = CSR_INI_SET_MASK;
 
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+       trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+#else
+       trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
        return trans;
 
 out_free_ict:
index 5262028b55055e4ca9243e8d27fee38bd9769f0e..837a7d536874a8ff9651ed10099384411a8bf084 100644 (file)
@@ -1,7 +1,8 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -33,7 +34,6 @@
 #include <linux/sched.h>
 #include <net/ip6_checksum.h>
 #include <net/tso.h>
-#include <net/ip6_checksum.h>
 
 #include "iwl-debug.h"
 #include "iwl-csr.h"
@@ -571,6 +571,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
                return ret;
 
        spin_lock_init(&txq->lock);
+       __skb_queue_head_init(&txq->overflow_q);
 
        /*
         * Tell nic where to find circular buffer of Tx Frame Descriptors for
@@ -621,6 +622,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
        }
        txq->active = false;
+
+       while (!skb_queue_empty(&txq->overflow_q)) {
+               struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+               iwl_op_mode_free_skb(trans->op_mode, skb);
+       }
+
        spin_unlock_bh(&txq->lock);
 
        /* just in case - this queue may have been stopped */
@@ -1052,8 +1060,41 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        iwl_pcie_txq_progress(txq);
 
-       if (iwl_queue_space(&txq->q) > txq->q.low_mark)
-               iwl_wake_queue(trans, txq);
+       if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+           test_bit(txq_id, trans_pcie->queue_stopped)) {
+               struct sk_buff_head skbs;
+
+               __skb_queue_head_init(&skbs);
+               skb_queue_splice_init(&txq->overflow_q, &skbs);
+
+               /*
+                * This is tricky: we are in reclaim path which is non
+                * re-entrant, so noone will try to take the access the
+                * txq data from that path. We stopped tx, so we can't
+                * have tx as well. Bottom line, we can unlock and re-lock
+                * later.
+                */
+               spin_unlock_bh(&txq->lock);
+
+               while (!skb_queue_empty(&skbs)) {
+                       struct sk_buff *skb = __skb_dequeue(&skbs);
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+                       u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
+                       struct iwl_device_cmd *dev_cmd =
+                               info->driver_data[dev_cmd_idx];
+
+                       /*
+                        * Note that we can very well be overflowing again.
+                        * In that case, iwl_queue_space will be small again
+                        * and we won't wake mac80211's queue.
+                        */
+                       iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
+               }
+               spin_lock_bh(&txq->lock);
+
+               if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+                       iwl_wake_queue(trans, txq);
+       }
 
        if (q->read_ptr == q->write_ptr) {
                IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
@@ -1686,6 +1727,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                wake_up(&trans_pcie->wait_command_queue);
        }
 
+       if (meta->flags & CMD_MAKE_TRANS_IDLE) {
+               IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
+                              iwl_get_cmd_string(trans, cmd->hdr.cmd));
+               set_bit(STATUS_TRANS_IDLE, &trans->status);
+               wake_up(&trans_pcie->d0i3_waitq);
+       }
+
+       if (meta->flags & CMD_WAKE_UP_TRANS) {
+               IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
+                              iwl_get_cmd_string(trans, cmd->hdr.cmd));
+               clear_bit(STATUS_TRANS_IDLE, &trans->status);
+               wake_up(&trans_pcie->d0i3_waitq);
+       }
+
        meta->flags = 0;
 
        spin_unlock_bh(&txq->lock);
@@ -2161,6 +2216,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                csum = skb_checksum(skb, offs, skb->len - offs, 0);
                *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
 
        if (skb_is_nonlinear(skb) &&
@@ -2177,6 +2234,22 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        spin_lock(&txq->lock);
 
+       if (iwl_queue_space(q) < q->high_mark) {
+               iwl_stop_queue(trans, txq);
+
+               /* don't put the packet on the ring, if there is no room */
+               if (unlikely(iwl_queue_space(q) < 3)) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+                       info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
+                               dev_cmd;
+                       __skb_queue_tail(&txq->overflow_q, skb);
+
+                       spin_unlock(&txq->lock);
+                       return 0;
+               }
+       }
+
        /* In AGG mode, the index in the ring must correspond to the WiFi
         * sequence number. This is a HW requirements to help the SCD to parse
         * the BA.
@@ -2281,12 +2354,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         * At this point the frame is "transmitted" successfully
         * and we will get a TX status notification eventually.
         */
-       if (iwl_queue_space(q) < q->high_mark) {
-               if (wait_write_ptr)
-                       iwl_pcie_txq_inc_wr_ptr(trans, txq);
-               else
-                       iwl_stop_queue(trans, txq);
-       }
        spin_unlock(&txq->lock);
        return 0;
 out_err:
index 6df3ee561d5214725c3c881895a6056861df74e1..515aa3f993f3dd8d1a65936472bfa6b00f4420de 100644 (file)
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
        spin_lock_bh(&local->baplock);
 
        res = hfa384x_setup_bap(dev, BAP0, rid, 0);
-       if (!res)
-               res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+       if (res)
+               goto unlock;
+
+       res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+       if (res)
+               goto unlock;
 
        if (le16_to_cpu(rec.len) == 0) {
                /* RID not available */
                res = -ENODATA;
+               goto unlock;
        }
 
        rlen = (le16_to_cpu(rec.len) - 1) * 2;
-       if (!res && exact_len && rlen != len) {
+       if (exact_len && rlen != len) {
                printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
                       "rid=0x%04x, len=%d (expected %d)\n",
                       dev->name, rid, rlen, len);
                res = -ENODATA;
        }
 
-       if (!res)
-               res = hfa384x_from_bap(dev, BAP0, buf, len);
+       res = hfa384x_from_bap(dev, BAP0, buf, len);
 
+unlock:
        spin_unlock_bh(&local->baplock);
        mutex_unlock(&local->rid_bap_mtx);
 
index c32889a1e39cf53d4e2d1ab3a4bfc2b8b492af1e..a723a85f5635716aae4afa86cc1d45657f899af0 100644 (file)
@@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
        }
 
-       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2))
+       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+                   ETH_ALEN, data->addresses[1].addr))
                goto nla_put_failure;
 
        /* We get the skb->data */
@@ -1333,10 +1334,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
        data->tx_bytes += skb->len;
        ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
 
-       if (ack && skb->len >= 16) {
-               struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       if (ack && skb->len >= 16)
                mac80211_hwsim_monitor_ack(channel, hdr->addr2);
-       }
 
        ieee80211_tx_info_clear_status(txi);
 
@@ -1845,10 +1844,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
 
 static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
-                                      enum ieee80211_ampdu_mlme_action action,
-                                      struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                      u8 buf_size, bool amsdu)
+                                      struct ieee80211_ampdu_params *params)
 {
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
                ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry(data, &hwsim_radios, list) {
-               if (mac80211_hwsim_addr_match(data, addr)) {
+               if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
                        _found = true;
                        break;
                }
index 86955c416b30ed20ec48a5013d42df1e5257f89b..2eea76a340b7b44087d160304faf0a9767882d12 100644 (file)
@@ -2039,6 +2039,43 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 
 
 
+int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+                      bool enabled, int timeout)
+{
+       struct lbs_private *priv = wiphy_priv(wiphy);
+
+       if  (!(priv->fwcapinfo & FW_CAPINFO_PS)) {
+               if (!enabled)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+       /* firmware does not work well with too long latency with power saving
+        * enabled, so do not enable it if there is only polling, no
+        * interrupts (like in some sdio hosts which can only
+        * poll for sdio irqs)
+        */
+       if  (priv->is_polling) {
+               if (!enabled)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+       if (!enabled) {
+               priv->psmode = LBS802_11POWERMODECAM;
+               if (priv->psstate != PS_STATE_FULL_POWER)
+                       lbs_set_ps_mode(priv,
+                                       PS_MODE_ACTION_EXIT_PS,
+                                       true);
+               return 0;
+       }
+       if (priv->psmode != LBS802_11POWERMODECAM)
+               return 0;
+       priv->psmode = LBS802_11POWERMODEMAX_PSP;
+       if (priv->connect_status == LBS_CONNECTED)
+               lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, true);
+       return 0;
+}
 
 /*
  * Initialization
@@ -2057,6 +2094,7 @@ static struct cfg80211_ops lbs_cfg80211_ops = {
        .change_virtual_intf = lbs_change_intf,
        .join_ibss = lbs_join_ibss,
        .leave_ibss = lbs_leave_ibss,
+       .set_power_mgmt = lbs_set_power_mgmt,
 };
 
 
index 0387a5b380c80f5eeafd05c53c3ec12fc5b892f5..4ddd0e5a6b85c68102483b7f42e27cab096a2a9f 100644 (file)
@@ -957,7 +957,7 @@ static void lbs_queue_cmd(struct lbs_private *priv,
 
        /* Exit_PS command needs to be queued in the header always. */
        if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) {
-               struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf;
+               struct cmd_ds_802_11_ps_mode *psm = (void *)cmdnode->cmdbuf;
 
                if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
                        if (priv->psstate != PS_STATE_FULL_POWER)
@@ -1387,7 +1387,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                 * PS command. Ignore it if it is not Exit_PS.
                                 * otherwise send it down immediately.
                                 */
-                               struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1];
+                               struct cmd_ds_802_11_ps_mode *psm = (void *)cmd;
 
                                lbs_deb_host(
                                       "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n",
@@ -1428,40 +1428,14 @@ int lbs_execute_next_command(struct lbs_private *priv)
                 * check if in power save mode, if yes, put the device back
                 * to PS mode
                 */
-#ifdef TODO
-               /*
-                * This was the old code for libertas+wext. Someone that
-                * understands this beast should re-code it in a sane way.
-                *
-                * I actually don't understand why this is related to WPA
-                * and to connection status, shouldn't powering should be
-                * independ of such things?
-                */
                if ((priv->psmode != LBS802_11POWERMODECAM) &&
                    (priv->psstate == PS_STATE_FULL_POWER) &&
-                   ((priv->connect_status == LBS_CONNECTED) ||
-                   lbs_mesh_connected(priv))) {
-                       if (priv->secinfo.WPAenabled ||
-                           priv->secinfo.WPA2enabled) {
-                               /* check for valid WPA group keys */
-                               if (priv->wpa_mcast_key.len ||
-                                   priv->wpa_unicast_key.len) {
-                                       lbs_deb_host(
-                                              "EXEC_NEXT_CMD: WPA enabled and GTK_SET"
-                                              " go back to PS_SLEEP");
-                                       lbs_set_ps_mode(priv,
-                                                       PS_MODE_ACTION_ENTER_PS,
-                                                       false);
-                               }
-                       } else {
-                               lbs_deb_host(
-                                      "EXEC_NEXT_CMD: cmdpendingq empty, "
-                                      "go back to PS_SLEEP");
-                               lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
-                                               false);
-                       }
+                   (priv->connect_status == LBS_CONNECTED)) {
+                       lbs_deb_host(
+                               "EXEC_NEXT_CMD: cmdpendingq empty, go back to PS_SLEEP");
+                       lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
+                                       false);
                }
-#endif
        }
 
        ret = 0;
index e5442e8956f7ac3b1182058864816dc700548fe5..c95bf6dc9522eae5bdd4824e4c157ddc15dbc562 100644 (file)
@@ -123,7 +123,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
        priv->cmd_timed_out = 0;
 
        if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) {
-               struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1];
+               /* struct cmd_ds_802_11_ps_mode also contains
+                * the header
+                */
+               struct cmd_ds_802_11_ps_mode *psmode = (void *)resp;
                u16 action = le16_to_cpu(psmode->action);
 
                lbs_deb_host(
@@ -254,6 +257,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
                               "EVENT: in FULL POWER mode, ignoring PS_SLEEP\n");
                        break;
                }
+               if (!list_empty(&priv->cmdpendingq)) {
+                       lbs_deb_cmd("EVENT: commands in queue, do not sleep\n");
+                       break;
+               }
                priv->psstate = PS_STATE_PRE_SLEEP;
 
                lbs_ps_confirm_sleep(priv);
index 6bd1608992b00bec61b596d1c770674df8e11024..edf710bc5e77dedcfcc03fd77054ccd93b88bf90 100644 (file)
@@ -99,6 +99,7 @@ struct lbs_private {
        /* Hardware access */
        void *card;
        bool iface_running;
+       u8 is_polling; /* host has to poll the card irq */
        u8 fw_ready;
        u8 surpriseremoved;
        u8 setup_fw_on_resume;
index 68fd3a9779bdb99a06fad6735445ecda9dca7842..13eae9ff8c354384a6c8a172f73b7d9da2bbddee 100644 (file)
@@ -1267,7 +1267,7 @@ static int if_sdio_probe(struct sdio_func *func,
        priv->reset_card = if_sdio_reset_card;
        priv->power_save = if_sdio_power_save;
        priv->power_restore = if_sdio_power_restore;
-
+       priv->is_polling = !(func->card->host->caps & MMC_CAP_SDIO_IRQ);
        ret = if_sdio_power_on(card);
        if (ret)
                goto err_activate_card;
index dff08a2896a38f644894749c006787e26b1f3ea6..aba0c9995b14b143f716d4a83c0a695f0cc628eb 100644 (file)
@@ -267,6 +267,7 @@ static int if_usb_probe(struct usb_interface *intf,
        priv->enter_deep_sleep = NULL;
        priv->exit_deep_sleep = NULL;
        priv->reset_deep_sleep_wakeup = NULL;
+       priv->is_polling = false;
 #ifdef CONFIG_OLPC
        if (machine_is_olpc())
                priv->reset_card = if_usb_reset_olpc_card;
index 8079560f496581600cb658c4fa09d8e5d1faed81..b35b8bcce24cc0a34081fab73928c6d44b2fd91e 100644 (file)
@@ -1060,7 +1060,12 @@ void lbs_remove_card(struct lbs_private *priv)
 
        if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
                priv->psmode = LBS802_11POWERMODECAM;
-               lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
+               /* no need to wakeup if already woken up,
+                * on suspend, this exit ps command is not processed
+                * the driver hangs
+                */
+               if (priv->psstate != PS_STATE_FULL_POWER)
+                       lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
        }
 
        if (priv->is_deep_sleep) {
index 2f0f9b5609d0139a301f7e57866f212cc1daefe1..24e649b1eb2453444967439166f2b9dd8b21e9b6 100644 (file)
@@ -237,4 +237,14 @@ device_dump
 
        cat fw_dump
 
+verext
+       This command is used to get extended firmware version string using
+       different configuration parameters.
+
+       Usage:
+               echo "[version_str_sel]" > verext
+               cat verext
+
+               [version_str_sel]: firmware support several extend version
+                                  string cases, include 0/1/10/20/21/99
 ===============================================================================
index e7adef72c05fcfd197f9aadb4eef24e5ef5f9511..f2dce81ba36ec834253791d662780640e03576ea 100644 (file)
@@ -1962,6 +1962,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
+       if (!mwifiex_stop_bg_scan(priv))
+               cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
@@ -2217,6 +2220,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                    "info: Trying to associate to %s and bssid %pM\n",
                    (char *)sme->ssid, sme->bssid);
 
+       if (!mwifiex_stop_bg_scan(priv))
+               cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
        ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
                                     priv->bss_mode, sme->channel, sme, 0);
        if (!ret) {
@@ -2420,6 +2426,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
                return -EBUSY;
        }
 
+       if (!mwifiex_stop_bg_scan(priv))
+               cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
        user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
        if (!user_scan_cfg)
                return -ENOMEM;
@@ -2487,6 +2496,125 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        return 0;
 }
 
+/* CFG802.11 operation handler for sched_scan_start.
+ *
+ * This function issues a bgscan config request to the firmware based upon
+ * the user specified sched_scan configuration. On successful completion,
+ * firmware will generate BGSCAN_REPORT event, driver should issue bgscan
+ * query command to get sched_scan results from firmware.
+ */
+static int
+mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 struct cfg80211_sched_scan_request *request)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       int i, offset;
+       struct ieee80211_channel *chan;
+       struct mwifiex_bg_scan_cfg *bgscan_cfg;
+       struct ieee_types_header *ie;
+
+       if (!request || (!request->n_ssids && !request->n_match_sets)) {
+               wiphy_err(wiphy, "%s : Invalid Sched_scan parameters",
+                         __func__);
+               return -EINVAL;
+       }
+
+       wiphy_info(wiphy, "sched_scan start : n_ssids=%d n_match_sets=%d ",
+                  request->n_ssids, request->n_match_sets);
+       wiphy_info(wiphy, "n_channels=%d interval=%d ie_len=%d\n",
+                  request->n_channels, request->scan_plans->interval,
+                  (int)request->ie_len);
+
+       bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL);
+       if (!bgscan_cfg)
+               return -ENOMEM;
+
+       if (priv->scan_request || priv->scan_aborting)
+               bgscan_cfg->start_later = true;
+
+       bgscan_cfg->num_ssids = request->n_match_sets;
+       bgscan_cfg->ssid_list = request->match_sets;
+
+       if (request->ie && request->ie_len) {
+               offset = 0;
+               for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
+                       if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
+                               continue;
+                       priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_BGSCAN;
+                       ie = (struct ieee_types_header *)(request->ie + offset);
+                       memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
+                       offset += sizeof(*ie) + ie->len;
+
+                       if (offset >= request->ie_len)
+                               break;
+               }
+       }
+
+       for (i = 0; i < min_t(u32, request->n_channels,
+                             MWIFIEX_BG_SCAN_CHAN_MAX); i++) {
+               chan = request->channels[i];
+               bgscan_cfg->chan_list[i].chan_number = chan->hw_value;
+               bgscan_cfg->chan_list[i].radio_type = chan->band;
+
+               if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids)
+                       bgscan_cfg->chan_list[i].scan_type =
+                                               MWIFIEX_SCAN_TYPE_PASSIVE;
+               else
+                       bgscan_cfg->chan_list[i].scan_type =
+                                               MWIFIEX_SCAN_TYPE_ACTIVE;
+
+               bgscan_cfg->chan_list[i].scan_time = 0;
+       }
+
+       bgscan_cfg->chan_per_scan = min_t(u32, request->n_channels,
+                                         MWIFIEX_BG_SCAN_CHAN_MAX);
+
+       /* Use at least 15 second for per scan cycle */
+       bgscan_cfg->scan_interval = (request->scan_plans->interval >
+                                    MWIFIEX_BGSCAN_INTERVAL) ?
+                               request->scan_plans->interval :
+                               MWIFIEX_BGSCAN_INTERVAL;
+
+       bgscan_cfg->repeat_count = MWIFIEX_BGSCAN_REPEAT_COUNT;
+       bgscan_cfg->report_condition = MWIFIEX_BGSCAN_SSID_MATCH |
+                               MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE;
+       bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA;
+       bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET;
+       bgscan_cfg->enable = true;
+       if (request->min_rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+               bgscan_cfg->report_condition |= MWIFIEX_BGSCAN_SSID_RSSI_MATCH;
+               bgscan_cfg->rssi_threshold = request->min_rssi_thold;
+       }
+
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG,
+                            HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) {
+               kfree(bgscan_cfg);
+               return -EFAULT;
+       }
+
+       priv->sched_scanning = true;
+
+       kfree(bgscan_cfg);
+       return 0;
+}
+
+/* CFG802.11 operation handler for sched_scan_stop.
+ *
+ * This function issues a bgscan config command to disable
+ * previous bgscan configuration in the firmware
+ */
+static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+                                           struct net_device *dev)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       wiphy_info(wiphy, "sched scan stop!");
+       mwifiex_stop_bg_scan(priv);
+
+       return 0;
+}
+
 static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
                                   struct mwifiex_private *priv)
 {
@@ -2848,6 +2976,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
        mwifiex_dev_debugfs_remove(priv);
 #endif
 
+       if (priv->sched_scanning)
+               priv->sched_scanning = false;
+
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
        skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
@@ -3044,10 +3175,12 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                                sizeof(byte_seq));
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
 
-               if (first_pat)
+               if (first_pat) {
                        first_pat = false;
-               else
+                       mwifiex_dbg(priv->adapter, INFO, "Wake on patterns\n");
+               } else {
                        mef_entry->filter[filt_num].filt_action = TYPE_AND;
+               }
 
                filt_num++;
        }
@@ -3073,6 +3206,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                mef_entry->filter[filt_num].offset = 56;
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
                mef_entry->filter[filt_num].filt_action = TYPE_OR;
+               mwifiex_dbg(priv->adapter, INFO, "Wake on magic packet\n");
        }
        return ret;
 }
@@ -3143,7 +3277,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
 
-       if (!priv->media_connected) {
+       if (!priv->media_connected && !wowlan->nd_config) {
                mwifiex_dbg(adapter, ERROR,
                            "Can not configure WOWLAN in disconnected state\n");
                return 0;
@@ -3155,19 +3289,30 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                return ret;
        }
 
+       memset(&hs_cfg, 0, sizeof(hs_cfg));
+       hs_cfg.conditions = le32_to_cpu(adapter->hs_cfg.conditions);
+
+       if (wowlan->nd_config) {
+               mwifiex_dbg(adapter, INFO, "Wake on net detect\n");
+               hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
+               mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev,
+                                                 wowlan->nd_config);
+       }
+
        if (wowlan->disconnect) {
-               memset(&hs_cfg, 0, sizeof(hs_cfg));
-               hs_cfg.is_invoke_hostcmd = false;
-               hs_cfg.conditions = HS_CFG_COND_MAC_EVENT;
-               hs_cfg.gpio = adapter->hs_cfg.gpio;
-               hs_cfg.gap = adapter->hs_cfg.gap;
-               ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
-                                           MWIFIEX_SYNC_CMD, &hs_cfg);
-               if (ret) {
-                       mwifiex_dbg(adapter, ERROR,
-                                   "Failed to set HS params\n");
-                       return ret;
-               }
+               hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
+               mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n");
+       }
+
+       hs_cfg.is_invoke_hostcmd = false;
+       hs_cfg.gpio = adapter->hs_cfg.gpio;
+       hs_cfg.gap = adapter->hs_cfg.gap;
+       ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
+                                   MWIFIEX_SYNC_CMD, &hs_cfg);
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to set HS params\n");
+               return ret;
        }
 
        return ret;
@@ -3175,6 +3320,64 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 
 static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
 {
+       struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+       struct mwifiex_private *priv =
+               mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+       struct mwifiex_ds_wakeup_reason wakeup_reason;
+       struct cfg80211_wowlan_wakeup wakeup_report;
+       int i;
+
+       mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
+                                 &wakeup_reason);
+       memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup));
+
+       wakeup_report.pattern_idx = -1;
+
+       switch (wakeup_reason.hs_wakeup_reason) {
+       case NO_HSWAKEUP_REASON:
+               break;
+       case BCAST_DATA_MATCHED:
+               break;
+       case MCAST_DATA_MATCHED:
+               break;
+       case UCAST_DATA_MATCHED:
+               break;
+       case MASKTABLE_EVENT_MATCHED:
+               break;
+       case NON_MASKABLE_EVENT_MATCHED:
+               if (wiphy->wowlan_config->disconnect)
+                       wakeup_report.disconnect = true;
+               if (wiphy->wowlan_config->nd_config)
+                       wakeup_report.net_detect = adapter->nd_info;
+               break;
+       case NON_MASKABLE_CONDITION_MATCHED:
+               break;
+       case MAGIC_PATTERN_MATCHED:
+               if (wiphy->wowlan_config->magic_pkt)
+                       wakeup_report.magic_pkt = true;
+               if (wiphy->wowlan_config->n_patterns)
+                       wakeup_report.pattern_idx = 1;
+               break;
+       case CONTROL_FRAME_MATCHED:
+               break;
+       case    MANAGEMENT_FRAME_MATCHED:
+               break;
+       default:
+               break;
+       }
+
+       if ((wakeup_reason.hs_wakeup_reason > 0) &&
+           (wakeup_reason.hs_wakeup_reason <= 7))
+               cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
+                                             GFP_KERNEL);
+
+       if (adapter->nd_info) {
+               for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
+                       kfree(adapter->nd_info->matches[i]);
+               kfree(adapter->nd_info);
+               adapter->nd_info = NULL;
+       }
+
        return 0;
 }
 
@@ -3590,8 +3793,8 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
                freq = ieee80211_channel_to_frequency(curr_bss->channel, band);
                chan = ieee80211_get_channel(wiphy, freq);
 
-               if (curr_bss->bcn_ht_oper) {
-                       second_chan_offset = curr_bss->bcn_ht_oper->ht_param &
+               if (priv->ht_param_present) {
+                       second_chan_offset = priv->assoc_resp_ht_param &
                                        IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
                        chan_type = mwifiex_sec_chan_offset_to_chan_type
                                                        (second_chan_offset);
@@ -3701,6 +3904,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
        .set_antenna = mwifiex_cfg80211_set_antenna,
        .del_station = mwifiex_cfg80211_del_station,
+       .sched_scan_start = mwifiex_cfg80211_sched_scan_start,
+       .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop,
 #ifdef CONFIG_PM
        .suspend = mwifiex_cfg80211_suspend,
        .resume = mwifiex_cfg80211_resume,
@@ -3720,11 +3925,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
 
 #ifdef CONFIG_PM
 static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
-       .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+       .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+               WIPHY_WOWLAN_NET_DETECT,
        .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
        .pattern_min_len = 1,
        .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
        .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+       .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS,
 };
 #endif
 
@@ -3829,6 +4036,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
                        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
                        WIPHY_FLAG_AP_UAPSD |
+                       WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                        WIPHY_FLAG_HAS_CHANNEL_SWITCH |
                        WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -3847,6 +4055,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
+       wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
+       wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+       wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH;
+
        wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
        wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
 
index cb25aa7e90db19d54df9b556d677c3a460093d77..a12adee776c6c6ac76c2f24d3bc44b88a1f5a6af 100644 (file)
@@ -1657,3 +1657,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
 
        return 0;
 }
+
+/* This function handles the command response of hs wakeup reason
+ * command.
+ */
+int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv,
+                             struct host_cmd_ds_command *resp,
+                             struct host_cmd_ds_wakeup_reason *wakeup_reason)
+{
+       wakeup_reason->wakeup_reason =
+               resp->params.hs_wakeup_reason.wakeup_reason;
+
+       return 0;
+}
index 0b9c580af988cd502e4ab4b05c351ffc030935ab..df28836a1d1100672050bf878a4dba70f40b87c6 100644 (file)
@@ -95,8 +95,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
 
        mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1);
 
-       if (!priv->version_str[0])
-               mwifiex_get_ver_ext(priv);
+       mwifiex_get_ver_ext(priv, 0);
 
        p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
        p += sprintf(p, "driver_version = %s", fmt);
@@ -583,6 +582,52 @@ done:
        return ret;
 }
 
+/* debugfs verext file write handler.
+ * This function is called when the 'verext' file is opened for write
+ */
+static ssize_t
+mwifiex_verext_write(struct file *file, const char __user *ubuf,
+                    size_t count, loff_t *ppos)
+{
+       int ret;
+       u32 versionstrsel;
+       struct mwifiex_private *priv = (void *)file->private_data;
+       char buf[16];
+
+       memset(buf, 0, sizeof(buf));
+
+       if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+               return -EFAULT;
+
+       ret = kstrtou32(buf, 10, &versionstrsel);
+       if (ret)
+               return ret;
+
+       priv->versionstrsel = versionstrsel;
+
+       return count;
+}
+
+/* Proc verext file read handler.
+ * This function is called when the 'verext' file is opened for reading
+ * This function can be used read driver exteneed verion string.
+ */
+static ssize_t
+mwifiex_verext_read(struct file *file, char __user *ubuf,
+                   size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv =
+               (struct mwifiex_private *)file->private_data;
+       char buf[256];
+       int ret;
+
+       mwifiex_get_ver_ext(priv, priv->versionstrsel);
+       ret = snprintf(buf, sizeof(buf), "version string: %s\n",
+                      priv->version_str);
+
+       return simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+}
+
 /* Proc memrw file write handler.
  * This function is called when the 'memrw' file is opened for writing
  * This function can be used to write to a memory location.
@@ -940,6 +985,7 @@ MWIFIEX_DFS_FILE_OPS(histogram);
 MWIFIEX_DFS_FILE_OPS(debug_mask);
 MWIFIEX_DFS_FILE_OPS(timeshare_coex);
 MWIFIEX_DFS_FILE_WRITE_OPS(reset);
+MWIFIEX_DFS_FILE_OPS(verext);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -968,6 +1014,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(debug_mask);
        MWIFIEX_DFS_ADD_FILE(timeshare_coex);
        MWIFIEX_DFS_ADD_FILE(reset);
+       MWIFIEX_DFS_ADD_FILE(verext);
 }
 
 /*
index d9c15cd36f1277f8a9a4e87a08a5b5fbc4ea9e97..bec300b9c2ea51bf478d6060b19b952360a36f31 100644 (file)
 #define BLOCK_NUMBER_OFFSET            15
 #define SDIO_HEADER_OFFSET             28
 
+#define MWIFIEX_SIZE_4K 0x4000
+
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
@@ -270,4 +272,26 @@ struct mwifiex_11h_intf_state {
        bool is_11h_enabled;
        bool is_11h_active;
 } __packed;
+
+#define MWIFIEX_FW_DUMP_IDX            0xff
+#define MWIFIEX_FW_DUMP_MAX_MEMSIZE     0x160000
+#define MWIFIEX_DRV_INFO_IDX           20
+#define FW_DUMP_MAX_NAME_LEN           8
+#define FW_DUMP_HOST_READY      0xEE
+#define FW_DUMP_DONE                   0xFF
+#define FW_DUMP_READ_DONE              0xFE
+
+struct memory_type_mapping {
+       u8 mem_name[FW_DUMP_MAX_NAME_LEN];
+       u8 *mem_ptr;
+       u32 mem_size;
+       u8 done_flag;
+};
+
+enum rdwr_status {
+       RDWR_STATUS_SUCCESS = 0,
+       RDWR_STATUS_FAILURE = 1,
+       RDWR_STATUS_DONE = 2
+};
+
 #endif /* !_MWIFIEX_DECL_H_ */
index ced7af2be29a3259eed41f33b0ea7ecea327e047..c134cf8652910b0381421f9e7c6ed746401ac719 100644 (file)
@@ -96,7 +96,7 @@ enum KEY_TYPE_ID {
 #define WAPI_KEY_LEN                   (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
 
 #define MAX_POLL_TRIES                 100
-#define MAX_FIRMWARE_POLL_TRIES                        100
+#define MAX_FIRMWARE_POLL_TRIES                        150
 
 #define FIRMWARE_READY_SDIO                            0xfedc
 #define FIRMWARE_READY_PCIE                            0xfedcba00
@@ -144,6 +144,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_WILDCARDSSID       (PROPRIETARY_TLV_BASE_ID + 18)
 #define TLV_TYPE_TSFTIMESTAMP       (PROPRIETARY_TLV_BASE_ID + 19)
 #define TLV_TYPE_RSSI_HIGH          (PROPRIETARY_TLV_BASE_ID + 22)
+#define TLV_TYPE_BGSCAN_START_LATER (PROPRIETARY_TLV_BASE_ID + 30)
 #define TLV_TYPE_AUTH_TYPE          (PROPRIETARY_TLV_BASE_ID + 31)
 #define TLV_TYPE_STA_MAC_ADDR       (PROPRIETARY_TLV_BASE_ID + 32)
 #define TLV_TYPE_BSSID              (PROPRIETARY_TLV_BASE_ID + 35)
@@ -177,6 +178,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_REPEAT_COUNT       (PROPRIETARY_TLV_BASE_ID + 176)
 #define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
 #define TLV_TYPE_MC_GROUP_INFO      (PROPRIETARY_TLV_BASE_ID + 184)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
@@ -331,6 +333,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_802_11_MAC_ADDRESS                0x004D
 #define HostCmd_CMD_802_11D_DOMAIN_INFO               0x005b
 #define HostCmd_CMD_802_11_KEY_MATERIAL               0x005e
+#define HostCmd_CMD_802_11_BG_SCAN_CONFIG             0x006b
 #define HostCmd_CMD_802_11_BG_SCAN_QUERY              0x006c
 #define HostCmd_CMD_WMM_GET_STATUS                    0x0071
 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT            0x0075
@@ -370,6 +373,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
+#define HostCmd_CMD_HS_WAKEUP_REASON                  0x0116
 #define HostCmd_CMD_TDLS_CONFIG                       0x0100
 #define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
@@ -523,6 +527,7 @@ enum P2P_MODES {
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
 #define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
+#define EVENT_BG_SCAN_STOPPED           0x00000065
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_MULTI_CHAN_INFO           0x0000006a
 #define EVENT_TX_STATUS_REPORT         0x00000074
@@ -539,6 +544,8 @@ enum P2P_MODES {
 
 #define MWIFIEX_MAX_PATTERN_LEN                40
 #define MWIFIEX_MAX_OFFSET_LEN         100
+#define MWIFIEX_MAX_ND_MATCH_SETS      10
+
 #define STACK_NBYTES                   100
 #define TYPE_DNUM                      1
 #define TYPE_BYTESEQ                   2
@@ -601,6 +608,20 @@ struct mwifiex_ie_types_data {
 #define MWIFIEX_RXPD_FLAGS_TDLS_PACKET      0x01
 #define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS    0x20
 
+enum HS_WAKEUP_REASON {
+       NO_HSWAKEUP_REASON = 0,
+       BCAST_DATA_MATCHED,
+       MCAST_DATA_MATCHED,
+       UCAST_DATA_MATCHED,
+       MASKTABLE_EVENT_MATCHED,
+       NON_MASKABLE_EVENT_MATCHED,
+       NON_MASKABLE_CONDITION_MATCHED,
+       MAGIC_PATTERN_MATCHED,
+       CONTROL_FRAME_MATCHED,
+       MANAGEMENT_FRAME_MATCHED,
+       RESERVED
+};
+
 struct txpd {
        u8 bss_type;
        u8 bss_num;
@@ -733,6 +754,21 @@ struct mwifiex_ie_types_num_probes {
        __le16 num_probes;
 } __packed;
 
+struct mwifiex_ie_types_repeat_count {
+       struct mwifiex_ie_types_header header;
+       __le16 repeat_count;
+} __packed;
+
+struct mwifiex_ie_types_min_rssi_threshold {
+       struct mwifiex_ie_types_header header;
+       __le16 rssi_threshold;
+} __packed;
+
+struct mwifiex_ie_types_bgscan_start_later {
+       struct mwifiex_ie_types_header header;
+       __le16 start_later;
+} __packed;
+
 struct mwifiex_ie_types_scan_chan_gap {
        struct mwifiex_ie_types_header header;
        /* time gap in TUs to be used between two consecutive channels scan */
@@ -1027,7 +1063,7 @@ struct ieee_types_assoc_rsp {
        __le16 cap_info_bitmap;
        __le16 status_code;
        __le16 a_id;
-       u8 ie_buffer[1];
+       u8 ie_buffer[0];
 } __packed;
 
 struct host_cmd_ds_802_11_associate_rsp {
@@ -1425,6 +1461,36 @@ struct mwifiex_user_scan_cfg {
        u16 scan_chan_gap;
 } __packed;
 
+#define MWIFIEX_BG_SCAN_CHAN_MAX 38
+#define MWIFIEX_BSS_MODE_INFRA 1
+#define MWIFIEX_BGSCAN_ACT_GET     0x0000
+#define MWIFIEX_BGSCAN_ACT_SET     0x0001
+#define MWIFIEX_BGSCAN_ACT_SET_ALL 0xff01
+/** ssid match */
+#define MWIFIEX_BGSCAN_SSID_MATCH          0x0001
+/** ssid match and RSSI exceeded */
+#define MWIFIEX_BGSCAN_SSID_RSSI_MATCH     0x0004
+/**wait for all channel scan to complete to report scan result*/
+#define MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE  0x80000000
+
+struct mwifiex_bg_scan_cfg {
+       u16 action;
+       u8 enable;
+       u8 bss_type;
+       u8 chan_per_scan;
+       u32 scan_interval;
+       u32 report_condition;
+       u8 num_probes;
+       u8 rssi_threshold;
+       u8 snr_threshold;
+       u16 repeat_count;
+       u16 start_later;
+       struct cfg80211_match_set *ssid_list;
+       u8 num_ssids;
+       struct mwifiex_user_scan_chan chan_list[MWIFIEX_BG_SCAN_CHAN_MAX];
+       u16 scan_chan_gap;
+} __packed;
+
 struct ie_body {
        u8 grp_key_oui[4];
        u8 ptk_cnt[2];
@@ -1470,6 +1536,20 @@ struct mwifiex_ie_types_bss_scan_info {
        __le64 tsf;
 } __packed;
 
+struct host_cmd_ds_802_11_bg_scan_config {
+       __le16 action;
+       u8 enable;
+       u8 bss_type;
+       u8 chan_per_scan;
+       u8 reserved;
+       __le16 reserved1;
+       __le32 scan_interval;
+       __le32 reserved2;
+       __le32 report_condition;
+       __le16 reserved3;
+       u8 tlv[0];
+} __packed;
+
 struct host_cmd_ds_802_11_bg_scan_query {
        u8 flush;
 } __packed;
@@ -2099,6 +2179,10 @@ struct host_cmd_ds_robust_coex {
        __le16 reserved;
 } __packed;
 
+struct host_cmd_ds_wakeup_reason {
+       u16  wakeup_reason;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2124,6 +2208,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_802_11_scan scan;
                struct host_cmd_ds_802_11_scan_ext ext_scan;
                struct host_cmd_ds_802_11_scan_rsp scan_resp;
+               struct host_cmd_ds_802_11_bg_scan_config bg_scan_config;
                struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
                struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
                struct host_cmd_ds_802_11_associate associate;
@@ -2170,6 +2255,7 @@ struct host_cmd_ds_command {
                struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
                struct host_cmd_ds_multi_chan_policy mc_policy;
                struct host_cmd_ds_robust_coex coex;
+               struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
        } params;
 } __packed;
 
index 6f7876ec31b7a6af705593139318df63bfd77a84..517653b3adabd8481edc73cd8eb08046e44f8484 100644 (file)
@@ -741,8 +741,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
        u32 poll_num = 1;
 
        if (adapter->if_ops.check_fw_status) {
-               adapter->winner = 0;
-
                /* check if firmware is already running */
                ret = adapter->if_ops.check_fw_status(adapter, poll_num);
                if (!ret) {
@@ -750,13 +748,23 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
                                    "WLAN FW already running! Skip FW dnld\n");
                        return 0;
                }
+       }
+
+       /* check if we are the winner for downloading FW */
+       if (adapter->if_ops.check_winner_status) {
+               adapter->winner = 0;
+               ret = adapter->if_ops.check_winner_status(adapter);
 
                poll_num = MAX_FIRMWARE_POLL_TRIES;
+               if (ret) {
+                       mwifiex_dbg(adapter, MSG,
+                                   "WLAN read winner status failed!\n");
+                       return ret;
+               }
 
-               /* check if we are the winner for downloading FW */
                if (!adapter->winner) {
                        mwifiex_dbg(adapter, MSG,
-                                   "FW already running! Skip FW dnld\n");
+                                   "WLAN is not the winner! Skip FW dnld\n");
                        goto poll_fw;
                }
        }
index 4f0174c649461ed3ac219419cb676921f31a66e6..14cfa37deb00f9640a7f1816a166eb659a363b8f 100644 (file)
@@ -271,6 +271,10 @@ struct mwifiex_ds_hs_cfg {
        u32 gap;
 };
 
+struct mwifiex_ds_wakeup_reason {
+       u16  hs_wakeup_reason;
+};
+
 #define DEEP_SLEEP_ON  1
 #define DEEP_SLEEP_OFF 0
 #define DEEP_SLEEP_IDLE_TIME   100
@@ -414,6 +418,7 @@ struct mwifiex_ds_mef_cfg {
 #define MWIFIEX_VSIE_MASK_SCAN     0x01
 #define MWIFIEX_VSIE_MASK_ASSOC    0x02
 #define MWIFIEX_VSIE_MASK_ADHOC    0x04
+#define MWIFIEX_VSIE_MASK_BGSCAN   0x08
 
 enum {
        MWIFIEX_FUNC_INIT = 1,
index cc09a81dbf6a8eac3f4b4ac8ee84839bcbafe811..62211fca91b7456ff71fae76be6e0ffe9a01753f 100644 (file)
@@ -644,6 +644,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        struct mwifiex_bssdescriptor *bss_desc;
        bool enable_data = true;
        u16 cap_info, status_code, aid;
+       const u8 *ie_ptr;
+       struct ieee80211_ht_operation *assoc_resp_ht_oper;
 
        assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
 
@@ -733,6 +735,19 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
                        = ((bss_desc->wmm_ie.qos_info_bitmap &
                                IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
 
+       /* Store the bandwidth information from assoc response */
+       ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer,
+                                 priv->assoc_rsp_size
+                                 - sizeof(struct ieee_types_assoc_rsp));
+       if (ie_ptr) {
+               assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr
+                                       + sizeof(struct ieee_types_header));
+               priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param;
+               priv->ht_param_present = true;
+       } else {
+               priv->ht_param_present = false;
+       }
+
        mwifiex_dbg(priv->adapter, INFO,
                    "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
                    priv->curr_pkt_filter);
index 79c16de8743e1945262f49aa82ec18f9022a2d2f..3cfa94677a8e2384bcbcc7ed188baf02814b84c9 100644 (file)
@@ -132,6 +132,13 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
                }
        }
 
+       if (adapter->nd_info) {
+               for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
+                       kfree(adapter->nd_info->matches[i]);
+               kfree(adapter->nd_info);
+               adapter->nd_info = NULL;
+       }
+
        vfree(adapter->chan_stats);
        kfree(adapter);
        return 0;
@@ -746,6 +753,13 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
 
        mwifiex_queue_main_work(priv->adapter);
 
+       if (priv->sched_scanning) {
+               mwifiex_dbg(priv->adapter, INFO,
+                           "aborting bgscan on ndo_stop\n");
+               mwifiex_stop_bg_scan(priv);
+               cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+       }
+
        return 0;
 }
 
index 2f7f478ce04b6d9ea179022bb52ff92c97255026..aea7aee46cf7834f6299148eac5d0ef3b30a2f19 100644 (file)
@@ -198,6 +198,11 @@ do {                                                               \
                               buf, len, false);                \
 } while (0)
 
+/** Min BGSCAN interval 15 second */
+#define MWIFIEX_BGSCAN_INTERVAL 15000
+/** default repeat count */
+#define MWIFIEX_BGSCAN_REPEAT_COUNT 6
+
 struct mwifiex_dbg {
        u32 num_cmd_host_to_card_failure;
        u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -293,6 +298,7 @@ struct mwifiex_tid_tbl {
 #define WMM_HIGHEST_PRIORITY           7
 #define HIGH_PRIO_TID                          7
 #define LOW_PRIO_TID                           0
+#define MWIFIEX_WMM_DRV_DELAY_MAX 510
 
 struct mwifiex_wmm_desc {
        struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
@@ -483,26 +489,6 @@ struct mwifiex_roc_cfg {
        struct ieee80211_channel chan;
 };
 
-#define MWIFIEX_FW_DUMP_IDX            0xff
-#define MWIFIEX_DRV_INFO_IDX           20
-#define FW_DUMP_MAX_NAME_LEN           8
-#define FW_DUMP_HOST_READY             0xEE
-#define FW_DUMP_DONE                   0xFF
-#define FW_DUMP_READ_DONE              0xFE
-
-struct memory_type_mapping {
-       u8 mem_name[FW_DUMP_MAX_NAME_LEN];
-       u8 *mem_ptr;
-       u32 mem_size;
-       u8 done_flag;
-};
-
-enum rdwr_status {
-       RDWR_STATUS_SUCCESS = 0,
-       RDWR_STATUS_FAILURE = 1,
-       RDWR_STATUS_DONE = 2
-};
-
 enum mwifiex_iface_work_flags {
        MWIFIEX_IFACE_WORK_DEVICE_DUMP,
        MWIFIEX_IFACE_WORK_CARD_RESET,
@@ -616,6 +602,7 @@ struct mwifiex_private {
        spinlock_t curr_bcn_buf_lock;
        struct wireless_dev wdev;
        struct mwifiex_chan_freq_power cfp;
+       u32 versionstrsel;
        char version_str[128];
 #ifdef CONFIG_DEBUG_FS
        struct dentry *dfs_dev_dir;
@@ -640,6 +627,7 @@ struct mwifiex_private {
        u32 mgmt_frame_mask;
        struct mwifiex_roc_cfg roc_cfg;
        bool scan_aborting;
+       u8 sched_scanning;
        u8 csa_chan;
        unsigned long csa_expire_time;
        u8 del_list_idx;
@@ -667,6 +655,8 @@ struct mwifiex_private {
        struct mwifiex_ds_mem_rw mem_rw;
        struct sk_buff_head bypass_txq;
        struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
+       u8 assoc_resp_ht_param;
+       bool ht_param_present;
 };
 
 
@@ -791,6 +781,7 @@ struct mwifiex_if_ops {
        int (*init_if) (struct mwifiex_adapter *);
        void (*cleanup_if) (struct mwifiex_adapter *);
        int (*check_fw_status) (struct mwifiex_adapter *, u32);
+       int (*check_winner_status)(struct mwifiex_adapter *);
        int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
        int (*register_dev) (struct mwifiex_adapter *);
        void (*unregister_dev) (struct mwifiex_adapter *);
@@ -994,6 +985,7 @@ struct mwifiex_adapter {
        u8 active_scan_triggered;
        bool usb_mc_status;
        bool usb_mc_setup;
+       struct cfg80211_wowlan_nd_info *nd_info;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1196,6 +1188,10 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
                                struct host_cmd_ds_command *resp);
 int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
                                         void *buf);
+int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
+                                     struct host_cmd_ds_command *cmd,
+                                     void *data_buf);
+int mwifiex_stop_bg_scan(struct mwifiex_private *priv);
 
 /*
  * This function checks if the queuing is RA based or not.
@@ -1417,7 +1413,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
 
 int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
 
-int mwifiex_get_ver_ext(struct mwifiex_private *priv);
+int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel);
 
 int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
                               struct ieee80211_channel *chan,
@@ -1586,6 +1582,12 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
 void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
+int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
+                             int cmd_type,
+                             struct mwifiex_ds_wakeup_reason *wakeup_reason);
+int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv,
+                             struct host_cmd_ds_command *resp,
+                             struct host_cmd_ds_wakeup_reason *wakeup_reason);
 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
 int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
index 6d0dc40e20e5c535f127e69743e046162fb71138..cc072142411aad8a175d3f790e7bc4a8900c21fa 100644 (file)
@@ -37,17 +37,6 @@ static struct mwifiex_if_ops pcie_ops;
 
 static struct semaphore add_remove_card_sem;
 
-static struct memory_type_mapping mem_type_mapping_tbl[] = {
-       {"ITCM", NULL, 0, 0xF0},
-       {"DTCM", NULL, 0, 0xF1},
-       {"SQRAM", NULL, 0, 0xF2},
-       {"IRAM", NULL, 0, 0xF3},
-       {"APU", NULL, 0, 0xF4},
-       {"CIU", NULL, 0, 0xF5},
-       {"ICU", NULL, 0, 0xF6},
-       {"MAC", NULL, 0, 0xF7},
-};
-
 static int
 mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                       size_t size, int flags)
@@ -206,6 +195,8 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
                card->pcie.tx_buf_size = data->tx_buf_size;
                card->pcie.can_dump_fw = data->can_dump_fw;
+               card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl;
+               card->pcie.num_mem_types = data->num_mem_types;
                card->pcie.can_ext_scan = data->can_ext_scan;
        }
 
@@ -323,6 +314,8 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
        struct pcie_service_card *card = adapter->card;
 
        *data = ioread32(card->pci_mmap1 + reg);
+       if (*data == 0xffffffff)
+               return 0xffffffff;
 
        return 0;
 }
@@ -2007,14 +2000,12 @@ done:
 
 /*
  * This function checks the firmware status in card.
- *
- * The winner interface is also determined by this function.
  */
 static int
 mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
 {
        int ret = 0;
-       u32 firmware_stat, winner_status;
+       u32 firmware_stat;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        u32 tries;
@@ -2054,19 +2045,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
                }
        }
 
-       if (ret) {
-               if (mwifiex_read_reg(adapter, reg->fw_status,
-                                    &winner_status))
-                       ret = -1;
-               else if (!winner_status) {
-                       mwifiex_dbg(adapter, INFO,
-                                   "PCI-E is the winner\n");
-                       adapter->winner = 1;
-               } else {
-                       mwifiex_dbg(adapter, ERROR,
-                                   "PCI-E is not the winner <%#x,%d>, exit dnld\n",
-                                   ret, adapter->winner);
-               }
+       return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int
+mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+       u32 winner = 0;
+       int ret = 0;
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) {
+               ret = -1;
+       } else if (!winner) {
+               mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n");
+               adapter->winner = 1;
+       } else {
+               mwifiex_dbg(adapter, ERROR,
+                           "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+                           ret, adapter->winner);
        }
 
        return ret;
@@ -2075,20 +2075,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
 /*
  * This function reads the interrupt status from card.
  */
-static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter,
+                                    int msg_id)
 {
        u32 pcie_ireg;
        unsigned long flags;
+       struct pcie_service_card *card = adapter->card;
 
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
                return;
 
-       if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
-               mwifiex_dbg(adapter, ERROR, "Read register failed\n");
-               return;
-       }
+       if (card->msix_enable && msg_id >= 0) {
+               pcie_ireg = BIT(msg_id);
+       } else {
+               if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
+                                    &pcie_ireg)) {
+                       mwifiex_dbg(adapter, ERROR, "Read register failed\n");
+                       return;
+               }
+
+               if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg)
+                       return;
 
-       if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
 
                mwifiex_pcie_disable_host_int(adapter);
 
@@ -2099,21 +2107,24 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
                                    "Write register failed\n");
                        return;
                }
-               spin_lock_irqsave(&adapter->int_lock, flags);
-               adapter->int_status |= pcie_ireg;
-               spin_unlock_irqrestore(&adapter->int_lock, flags);
-
-               if (!adapter->pps_uapsd_mode &&
-                   adapter->ps_state == PS_STATE_SLEEP &&
-                   mwifiex_pcie_ok_to_access_hw(adapter)) {
-                               /* Potentially for PCIe we could get other
-                                * interrupts like shared. Don't change power
-                                * state until cookie is set */
-                               adapter->ps_state = PS_STATE_AWAKE;
-                               adapter->pm_wakeup_fw_try = false;
-                               del_timer(&adapter->wakeup_timer);
-               }
        }
+
+       if (!adapter->pps_uapsd_mode &&
+           adapter->ps_state == PS_STATE_SLEEP &&
+           mwifiex_pcie_ok_to_access_hw(adapter)) {
+               /* Potentially for PCIe we could get other
+                * interrupts like shared. Don't change power
+                * state until cookie is set
+                */
+               adapter->ps_state = PS_STATE_AWAKE;
+               adapter->pm_wakeup_fw_try = false;
+               del_timer(&adapter->wakeup_timer);
+       }
+
+       spin_lock_irqsave(&adapter->int_lock, flags);
+       adapter->int_status |= pcie_ireg;
+       spin_unlock_irqrestore(&adapter->int_lock, flags);
+       mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg);
 }
 
 /*
@@ -2124,7 +2135,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
  */
 static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
 {
-       struct pci_dev *pdev = (struct pci_dev *)context;
+       struct mwifiex_msix_context *ctx = context;
+       struct pci_dev *pdev = ctx->dev;
        struct pcie_service_card *card;
        struct mwifiex_adapter *adapter;
 
@@ -2144,7 +2156,11 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
        if (adapter->surprise_removed)
                goto exit;
 
-       mwifiex_interrupt_status(adapter);
+       if (card->msix_enable)
+               mwifiex_interrupt_status(adapter, ctx->msg_id);
+       else
+               mwifiex_interrupt_status(adapter, -1);
+
        mwifiex_queue_main_work(adapter);
 
 exit:
@@ -2164,7 +2180,7 @@ exit:
  * In case of Rx packets received, the packets are uploaded from card to
  * host and processed accordingly.
  */
-static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
 {
        int ret;
        u32 pcie_ireg;
@@ -2244,6 +2260,69 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter)
+{
+       int ret;
+       u32 pcie_ireg;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->int_lock, flags);
+       /* Clear out unused interrupts */
+       pcie_ireg = adapter->int_status;
+       adapter->int_status = 0;
+       spin_unlock_irqrestore(&adapter->int_lock, flags);
+
+       if (pcie_ireg & HOST_INTR_DNLD_DONE) {
+               mwifiex_dbg(adapter, INTR,
+                           "info: TX DNLD Done\n");
+               ret = mwifiex_pcie_send_data_complete(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_UPLD_RDY) {
+               mwifiex_dbg(adapter, INTR,
+                           "info: Rx DATA\n");
+               ret = mwifiex_pcie_process_recv_data(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_EVENT_RDY) {
+               mwifiex_dbg(adapter, INTR,
+                           "info: Rx EVENT\n");
+               ret = mwifiex_pcie_process_event_ready(adapter);
+               if (ret)
+                       return ret;
+       }
+
+       if (pcie_ireg & HOST_INTR_CMD_DONE) {
+               if (adapter->cmd_sent) {
+                       mwifiex_dbg(adapter, INTR,
+                                   "info: CMD sent Interrupt\n");
+                       adapter->cmd_sent = false;
+               }
+               /* Handle command response */
+               ret = mwifiex_pcie_process_cmd_complete(adapter);
+               if (ret)
+                       return ret;
+       }
+
+       mwifiex_dbg(adapter, INTR,
+                   "info: cmd_sent=%d data_sent=%d\n",
+                   adapter->cmd_sent, adapter->data_sent);
+
+       return 0;
+}
+
+static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+
+       if (card->msix_enable)
+               return mwifiex_process_msix_int(adapter);
+       else
+               return mwifiex_process_pcie_int(adapter);
+}
+
 /*
  * This function downloads data from driver to card.
  *
@@ -2278,10 +2357,15 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
 {
        int ret, tries;
        u8 ctrl_data;
+       u32 fw_status;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
+       if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status))
+               return RDWR_STATUS_FAILURE;
+
+       ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
+                               reg->fw_dump_host_ready);
        if (ret) {
                mwifiex_dbg(adapter, ERROR,
                            "PCIE write err\n");
@@ -2294,11 +2378,11 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
                        return RDWR_STATUS_SUCCESS;
                if (doneflag && ctrl_data == doneflag)
                        return RDWR_STATUS_DONE;
-               if (ctrl_data != FW_DUMP_HOST_READY) {
+               if (ctrl_data != reg->fw_dump_host_ready) {
                        mwifiex_dbg(adapter, WARN,
                                    "The ctrl reg was changed, re-try again!\n");
                        ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
-                                               FW_DUMP_HOST_READY);
+                                               reg->fw_dump_host_ready);
                        if (ret) {
                                mwifiex_dbg(adapter, ERROR,
                                            "PCIE write err\n");
@@ -2318,7 +2402,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
        unsigned int reg, reg_start, reg_end;
-       u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
+       u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num;
+       u8 idx, i, read_reg, doneflag = 0;
        enum rdwr_status stat;
        u32 memory_size;
        int ret;
@@ -2326,8 +2411,9 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
        if (!card->pcie.can_dump_fw)
                return;
 
-       for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
-               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+       for (idx = 0; idx < adapter->num_mem_types; idx++) {
+               struct memory_type_mapping *entry =
+                               &adapter->mem_type_mapping_tbl[idx];
 
                if (entry->mem_ptr) {
                        vfree(entry->mem_ptr);
@@ -2336,7 +2422,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                entry->mem_size = 0;
        }
 
-       mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
 
        /* Read the number of the memories which will dump */
        stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
@@ -2344,28 +2430,38 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                return;
 
        reg = creg->fw_dump_start;
-       mwifiex_read_reg_byte(adapter, reg, &dump_num);
+       mwifiex_read_reg_byte(adapter, reg, &fw_dump_num);
+
+       /* W8997 chipset firmware dump will be restore in single region*/
+       if (fw_dump_num == 0)
+               dump_num = 1;
+       else
+               dump_num = fw_dump_num;
 
        /* Read the length of every memory which will dump */
        for (idx = 0; idx < dump_num; idx++) {
-               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
-
-               stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
-               if (stat == RDWR_STATUS_FAILURE)
-                       return;
-
+               struct memory_type_mapping *entry =
+                               &adapter->mem_type_mapping_tbl[idx];
                memory_size = 0;
-               reg = creg->fw_dump_start;
-               for (i = 0; i < 4; i++) {
-                       mwifiex_read_reg_byte(adapter, reg, &read_reg);
-                       memory_size |= (read_reg << (i * 8));
+               if (fw_dump_num != 0) {
+                       stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
+                       if (stat == RDWR_STATUS_FAILURE)
+                               return;
+
+                       reg = creg->fw_dump_start;
+                       for (i = 0; i < 4; i++) {
+                               mwifiex_read_reg_byte(adapter, reg, &read_reg);
+                               memory_size |= (read_reg << (i * 8));
                        reg++;
+                       }
+               } else {
+                       memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE;
                }
 
                if (memory_size == 0) {
                        mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
                        ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
-                                               FW_DUMP_READ_DONE);
+                                               creg->fw_dump_read_done);
                        if (ret) {
                                mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
                                return;
@@ -2400,11 +2496,21 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                                mwifiex_read_reg_byte(adapter, reg, dbg_ptr);
                                if (dbg_ptr < end_ptr) {
                                        dbg_ptr++;
-                               } else {
-                                       mwifiex_dbg(adapter, ERROR,
-                                                   "Allocated buf not enough\n");
-                                       return;
+                                       continue;
                                }
+                               mwifiex_dbg(adapter, ERROR,
+                                           "pre-allocated buf not enough\n");
+                               tmp_ptr =
+                                       vzalloc(memory_size + MWIFIEX_SIZE_4K);
+                               if (!tmp_ptr)
+                                       return;
+                               memcpy(tmp_ptr, entry->mem_ptr, memory_size);
+                               vfree(entry->mem_ptr);
+                               entry->mem_ptr = tmp_ptr;
+                               tmp_ptr = NULL;
+                               dbg_ptr = entry->mem_ptr + memory_size;
+                               memory_size += MWIFIEX_SIZE_4K;
+                               end_ptr = entry->mem_ptr + memory_size;
                        }
 
                        if (stat != RDWR_STATUS_DONE)
@@ -2416,7 +2522,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                        break;
                } while (true);
        }
-       mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
 }
 
 static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
@@ -2595,10 +2701,43 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
 
 static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
 {
-       int ret;
+       int ret, i, j;
        struct pcie_service_card *card = adapter->card;
        struct pci_dev *pdev = card->dev;
 
+       if (card->pcie.reg->msix_support) {
+               for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+                       card->msix_entries[i].entry = i;
+               ret = pci_enable_msix_exact(pdev, card->msix_entries,
+                                           MWIFIEX_NUM_MSIX_VECTORS);
+               if (!ret) {
+                       for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) {
+                               card->msix_ctx[i].dev = pdev;
+                               card->msix_ctx[i].msg_id = i;
+
+                               ret = request_irq(card->msix_entries[i].vector,
+                                                 mwifiex_pcie_interrupt, 0,
+                                                 "MWIFIEX_PCIE_MSIX",
+                                                 &card->msix_ctx[i]);
+                               if (ret)
+                                       break;
+                       }
+
+                       if (ret) {
+                               mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n",
+                                           ret);
+                               for (j = 0; j < i; j++)
+                                       free_irq(card->msix_entries[j].vector,
+                                                &card->msix_ctx[i]);
+                               pci_disable_msix(pdev);
+                       } else {
+                               mwifiex_dbg(adapter, MSG, "MSIx enabled!");
+                               card->msix_enable = 1;
+                               return 0;
+                       }
+               }
+       }
+
        if (pci_enable_msi(pdev) != 0)
                pci_disable_msi(pdev);
        else
@@ -2606,8 +2745,10 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
 
        mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable);
 
+       card->share_irq_ctx.dev = pdev;
+       card->share_irq_ctx.msg_id = -1;
        ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
-                         "MRVL_PCIE", pdev);
+                         "MRVL_PCIE", &card->share_irq_ctx);
        if (ret) {
                pr_err("request_irq failed: ret=%d\n", ret);
                adapter->card = NULL;
@@ -2635,8 +2776,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
                return -1;
 
        adapter->tx_buf_size = card->pcie.tx_buf_size;
-       adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
-       adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+       adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
+       adapter->num_mem_types = card->pcie.num_mem_types;
        strcpy(adapter->fw_name, card->pcie.firmware);
        adapter->ext_scan = card->pcie.can_ext_scan;
 
@@ -2653,11 +2794,28 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg;
+       struct pci_dev *pdev = card->dev;
+       int i;
 
        if (card) {
-               mwifiex_dbg(adapter, INFO,
-                           "%s(): calling free_irq()\n", __func__);
-               free_irq(card->dev->irq, card->dev);
+               if (card->msix_enable) {
+                       for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+                               synchronize_irq(card->msix_entries[i].vector);
+
+                       for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+                               free_irq(card->msix_entries[i].vector,
+                                        &card->msix_ctx[i]);
+
+                       card->msix_enable = 0;
+                       pci_disable_msix(pdev);
+              } else {
+                       mwifiex_dbg(adapter, INFO,
+                                   "%s(): calling free_irq()\n", __func__);
+                      free_irq(card->dev->irq, &card->share_irq_ctx);
+
+                       if (card->msi_enable)
+                               pci_disable_msi(pdev);
+              }
 
                reg = card->pcie.reg;
                if (reg->sleep_cookie)
@@ -2675,6 +2833,7 @@ static struct mwifiex_if_ops pcie_ops = {
        .init_if =                      mwifiex_pcie_init,
        .cleanup_if =                   mwifiex_pcie_cleanup,
        .check_fw_status =              mwifiex_check_fw_status,
+       .check_winner_status =          mwifiex_check_winner_status,
        .prog_fw =                      mwifiex_prog_fw_w_helper,
        .register_dev =                 mwifiex_register_dev,
        .unregister_dev =               mwifiex_unregister_dev,
index 6fc28737b576c64b1ac52d0199611cb81c8bad2b..29e58ce877e349649c747080bce913f1063362c8 100644 (file)
@@ -26,6 +26,7 @@
 #include    <linux/pcieport_if.h>
 #include    <linux/interrupt.h>
 
+#include    "decl.h"
 #include    "main.h"
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
@@ -135,6 +136,9 @@ struct mwifiex_pcie_card_reg {
        u16 fw_dump_ctrl;
        u16 fw_dump_start;
        u16 fw_dump_end;
+       u8 fw_dump_host_ready;
+       u8 fw_dump_read_done;
+       u8 msix_support;
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
@@ -166,6 +170,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
        .ring_tx_start_ptr = 0,
        .pfu_enabled = 0,
        .sleep_cookie = 1,
+       .msix_support = 0,
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
@@ -200,6 +205,9 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
        .fw_dump_ctrl = 0xcf4,
        .fw_dump_start = 0xcf8,
        .fw_dump_end = 0xcff,
+       .fw_dump_host_ready = 0xee,
+       .fw_dump_read_done = 0xfe,
+       .msix_support = 0,
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
@@ -231,6 +239,27 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
        .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
        .pfu_enabled = 1,
        .sleep_cookie = 0,
+       .fw_dump_ctrl = 0xcf4,
+       .fw_dump_start = 0xcf8,
+       .fw_dump_end = 0xcff,
+       .fw_dump_host_ready = 0xcc,
+       .fw_dump_read_done = 0xdd,
+       .msix_support = 1,
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
+       {"ITCM", NULL, 0, 0xF0},
+       {"DTCM", NULL, 0, 0xF1},
+       {"SQRAM", NULL, 0, 0xF2},
+       {"IRAM", NULL, 0, 0xF3},
+       {"APU", NULL, 0, 0xF4},
+       {"CIU", NULL, 0, 0xF5},
+       {"ICU", NULL, 0, 0xF6},
+       {"MAC", NULL, 0, 0xF7},
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
+       {"DUMP", NULL, 0, 0xDD},
 };
 
 struct mwifiex_pcie_device {
@@ -239,6 +268,8 @@ struct mwifiex_pcie_device {
        u16 blksz_fw_dl;
        u16 tx_buf_size;
        bool can_dump_fw;
+       struct memory_type_mapping *mem_type_mapping_tbl;
+       u8 num_mem_types;
        bool can_ext_scan;
 };
 
@@ -257,6 +288,8 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
        .can_dump_fw = true,
+       .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
+       .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
        .can_ext_scan = true,
 };
 
@@ -265,7 +298,9 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
        .reg            = &mwifiex_reg_8997,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
-       .can_dump_fw = false,
+       .can_dump_fw = true,
+       .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
+       .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
        .can_ext_scan = true,
 };
 
@@ -290,6 +325,13 @@ struct mwifiex_pfu_buf_desc {
        u32 reserved;
 } __packed;
 
+#define MWIFIEX_NUM_MSIX_VECTORS   4
+
+struct mwifiex_msix_context {
+       struct pci_dev *dev;
+       u16 msg_id;
+};
+
 struct pcie_service_card {
        struct pci_dev *dev;
        struct mwifiex_adapter *adapter;
@@ -327,6 +369,12 @@ struct pcie_service_card {
        void __iomem *pci_mmap;
        void __iomem *pci_mmap1;
        int msi_enable;
+       int msix_enable;
+#ifdef CONFIG_PCI
+       struct msix_entry msix_entries[MWIFIEX_NUM_MSIX_VECTORS];
+#endif
+       struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS];
+       struct mwifiex_msix_context share_irq_ctx;
 };
 
 static inline int
index c20017ced5667c796abec645ae5cc8ca620b530f..489f7a911a83f225561a2ddf902c637457b6b960 100644 (file)
@@ -547,6 +547,61 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
        return chan_idx;
 }
 
+/* This function creates a channel list tlv for bgscan config, based
+ * on region/band information.
+ */
+static int
+mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
+                                  const struct mwifiex_bg_scan_cfg
+                                               *bgscan_cfg_in,
+                                  struct mwifiex_chan_scan_param_set
+                                               *scan_chan_list)
+{
+       enum ieee80211_band band;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *ch;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       int chan_idx = 0, i;
+
+       for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+               if (!priv->wdev.wiphy->bands[band])
+                       continue;
+
+               sband = priv->wdev.wiphy->bands[band];
+
+               for (i = 0; (i < sband->n_channels) ; i++) {
+                       ch = &sband->channels[i];
+                       if (ch->flags & IEEE80211_CHAN_DISABLED)
+                               continue;
+                       scan_chan_list[chan_idx].radio_type = band;
+
+                       if (bgscan_cfg_in->chan_list[0].scan_time)
+                               scan_chan_list[chan_idx].max_scan_time =
+                                       cpu_to_le16((u16)bgscan_cfg_in->
+                                       chan_list[0].scan_time);
+                       else if (ch->flags & IEEE80211_CHAN_NO_IR)
+                               scan_chan_list[chan_idx].max_scan_time =
+                                       cpu_to_le16(adapter->passive_scan_time);
+                       else
+                               scan_chan_list[chan_idx].max_scan_time =
+                                       cpu_to_le16(adapter->
+                                                   specific_scan_time);
+
+                       if (ch->flags & IEEE80211_CHAN_NO_IR)
+                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
+                                       |= MWIFIEX_PASSIVE_SCAN;
+                       else
+                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
+                                       &= ~MWIFIEX_PASSIVE_SCAN;
+
+                       scan_chan_list[chan_idx].chan_number =
+                                                       (u32)ch->hw_value;
+                       chan_idx++;
+               }
+       }
+       return chan_idx;
+}
+
 /* This function appends rate TLV to scan config command. */
 static int
 mwifiex_append_rate_tlv(struct mwifiex_private *priv,
@@ -2037,6 +2092,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
        u8 is_bgscan_resp;
        __le64 fw_tsf = 0;
        u8 *radio_type;
+       struct cfg80211_wowlan_nd_match *pmatch;
+       struct cfg80211_sched_scan_request *nd_config = NULL;
 
        is_bgscan_resp = (le16_to_cpu(resp->command)
                          == HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -2099,6 +2156,21 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                             (struct mwifiex_ie_types_data **)
                                             &chan_band_tlv);
 
+#ifdef CONFIG_PM
+       if (priv->wdev.wiphy->wowlan_config)
+               nd_config = priv->wdev.wiphy->wowlan_config->nd_config;
+#endif
+
+       if (nd_config) {
+               adapter->nd_info =
+                       kzalloc(sizeof(struct cfg80211_wowlan_nd_match) +
+                               sizeof(struct cfg80211_wowlan_nd_match *) *
+                               scan_rsp->number_of_sets, GFP_ATOMIC);
+
+               if (adapter->nd_info)
+                       adapter->nd_info->n_matches = scan_rsp->number_of_sets;
+       }
+
        for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
                /*
                 * If the TSF TLV was appended to the scan results, save this
@@ -2117,6 +2189,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                        radio_type = NULL;
                }
 
+               if (chan_band_tlv && adapter->nd_info) {
+                       adapter->nd_info->matches[idx] =
+                               kzalloc(sizeof(*pmatch) +
+                               sizeof(u32), GFP_ATOMIC);
+
+                       pmatch = adapter->nd_info->matches[idx];
+
+                       if (pmatch) {
+                               memset(pmatch, 0, sizeof(*pmatch));
+                               if (chan_band_tlv) {
+                                       pmatch->n_channels = 1;
+                                       pmatch->channels[0] =
+                                               chan_band->chan_number;
+                               }
+                       }
+               }
+
                ret = mwifiex_parse_single_response_buf(priv, &bss_info,
                                                        &bytes_left,
                                                        le64_to_cpu(fw_tsf),
@@ -2155,6 +2244,227 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
        return 0;
 }
 
+/* This function prepares an background scan config command to be sent
+ * to the firmware
+ */
+int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
+                                     struct host_cmd_ds_command *cmd,
+                                     void *data_buf)
+{
+       struct host_cmd_ds_802_11_bg_scan_config *bgscan_config =
+                                       &cmd->params.bg_scan_config;
+       struct mwifiex_bg_scan_cfg *bgscan_cfg_in = data_buf;
+       u8 *tlv_pos = bgscan_config->tlv;
+       u8 num_probes;
+       u32 ssid_len, chan_idx, scan_type, scan_dur, chan_num;
+       int i;
+       struct mwifiex_ie_types_num_probes *num_probes_tlv;
+       struct mwifiex_ie_types_repeat_count *repeat_count_tlv;
+       struct mwifiex_ie_types_min_rssi_threshold *rssi_threshold_tlv;
+       struct mwifiex_ie_types_bgscan_start_later *start_later_tlv;
+       struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+       struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv;
+       struct mwifiex_chan_scan_param_set *temp_chan;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG);
+       cmd->size = cpu_to_le16(sizeof(*bgscan_config) + S_DS_GEN);
+
+       bgscan_config->action = cpu_to_le16(bgscan_cfg_in->action);
+       bgscan_config->enable = bgscan_cfg_in->enable;
+       bgscan_config->bss_type = bgscan_cfg_in->bss_type;
+       bgscan_config->scan_interval =
+               cpu_to_le32(bgscan_cfg_in->scan_interval);
+       bgscan_config->report_condition =
+               cpu_to_le32(bgscan_cfg_in->report_condition);
+
+       /*  stop sched scan  */
+       if (!bgscan_config->enable)
+               return 0;
+
+       bgscan_config->chan_per_scan = bgscan_cfg_in->chan_per_scan;
+
+       num_probes = (bgscan_cfg_in->num_probes ? bgscan_cfg_in->
+                     num_probes : priv->adapter->scan_probes);
+
+       if (num_probes) {
+               num_probes_tlv = (struct mwifiex_ie_types_num_probes *)tlv_pos;
+               num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
+               num_probes_tlv->header.len =
+                       cpu_to_le16(sizeof(num_probes_tlv->num_probes));
+               num_probes_tlv->num_probes = cpu_to_le16((u16)num_probes);
+
+               tlv_pos += sizeof(num_probes_tlv->header) +
+                       le16_to_cpu(num_probes_tlv->header.len);
+       }
+
+       if (bgscan_cfg_in->repeat_count) {
+               repeat_count_tlv =
+                       (struct mwifiex_ie_types_repeat_count *)tlv_pos;
+               repeat_count_tlv->header.type =
+                       cpu_to_le16(TLV_TYPE_REPEAT_COUNT);
+               repeat_count_tlv->header.len =
+                       cpu_to_le16(sizeof(repeat_count_tlv->repeat_count));
+               repeat_count_tlv->repeat_count =
+                       cpu_to_le16(bgscan_cfg_in->repeat_count);
+
+               tlv_pos += sizeof(repeat_count_tlv->header) +
+                       le16_to_cpu(repeat_count_tlv->header.len);
+       }
+
+       if (bgscan_cfg_in->rssi_threshold) {
+               rssi_threshold_tlv =
+                       (struct mwifiex_ie_types_min_rssi_threshold *)tlv_pos;
+               rssi_threshold_tlv->header.type =
+                       cpu_to_le16(TLV_TYPE_RSSI_LOW);
+               rssi_threshold_tlv->header.len =
+                       cpu_to_le16(sizeof(rssi_threshold_tlv->rssi_threshold));
+               rssi_threshold_tlv->rssi_threshold =
+                       cpu_to_le16(bgscan_cfg_in->rssi_threshold);
+
+               tlv_pos += sizeof(rssi_threshold_tlv->header) +
+                       le16_to_cpu(rssi_threshold_tlv->header.len);
+       }
+
+       for (i = 0; i < bgscan_cfg_in->num_ssids; i++) {
+               ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len;
+
+               wildcard_ssid_tlv =
+                       (struct mwifiex_ie_types_wildcard_ssid_params *)tlv_pos;
+               wildcard_ssid_tlv->header.type =
+                               cpu_to_le16(TLV_TYPE_WILDCARDSSID);
+               wildcard_ssid_tlv->header.len = cpu_to_le16(
+                               (u16)(ssid_len + sizeof(wildcard_ssid_tlv->
+                                                        max_ssid_length)));
+
+               /* max_ssid_length = 0 tells firmware to perform
+                * specific scan for the SSID filled, whereas
+                * max_ssid_length = IEEE80211_MAX_SSID_LEN is for
+                * wildcard scan.
+                */
+               if (ssid_len)
+                       wildcard_ssid_tlv->max_ssid_length = 0;
+               else
+                       wildcard_ssid_tlv->max_ssid_length =
+                                               IEEE80211_MAX_SSID_LEN;
+
+               memcpy(wildcard_ssid_tlv->ssid,
+                      bgscan_cfg_in->ssid_list[i].ssid.ssid, ssid_len);
+
+               tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+                               + le16_to_cpu(wildcard_ssid_tlv->header.len));
+       }
+
+       chan_list_tlv = (struct mwifiex_ie_types_chan_list_param_set *)tlv_pos;
+
+       if (bgscan_cfg_in->chan_list[0].chan_number) {
+               dev_dbg(priv->adapter->dev, "info: bgscan: Using supplied channel list\n");
+
+               chan_list_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
+
+               for (chan_idx = 0;
+                    chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
+                    bgscan_cfg_in->chan_list[chan_idx].chan_number;
+                    chan_idx++) {
+                       temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+
+                       /* Increment the TLV header length by size appended */
+                       le16_add_cpu(&chan_list_tlv->header.len,
+                                    sizeof(chan_list_tlv->chan_scan_param));
+
+                       temp_chan->chan_number =
+                               bgscan_cfg_in->chan_list[chan_idx].chan_number;
+                       temp_chan->radio_type =
+                               bgscan_cfg_in->chan_list[chan_idx].radio_type;
+
+                       scan_type =
+                               bgscan_cfg_in->chan_list[chan_idx].scan_type;
+
+                       if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+                               temp_chan->chan_scan_mode_bitmap
+                                       |= MWIFIEX_PASSIVE_SCAN;
+                       else
+                               temp_chan->chan_scan_mode_bitmap
+                                       &= ~MWIFIEX_PASSIVE_SCAN;
+
+                       if (bgscan_cfg_in->chan_list[chan_idx].scan_time) {
+                               scan_dur = (u16)bgscan_cfg_in->
+                                       chan_list[chan_idx].scan_time;
+                       } else {
+                               scan_dur = (scan_type ==
+                                           MWIFIEX_SCAN_TYPE_PASSIVE) ?
+                                           priv->adapter->passive_scan_time :
+                                           priv->adapter->specific_scan_time;
+                       }
+
+                       temp_chan->min_scan_time = cpu_to_le16(scan_dur);
+                       temp_chan->max_scan_time = cpu_to_le16(scan_dur);
+               }
+       } else {
+               dev_dbg(priv->adapter->dev,
+                       "info: bgscan: Creating full region channel list\n");
+               chan_num =
+                       mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in,
+                                                          chan_list_tlv->
+                                                          chan_scan_param);
+               le16_add_cpu(&chan_list_tlv->header.len,
+                            chan_num *
+                            sizeof(chan_list_tlv->chan_scan_param[0]));
+       }
+
+       tlv_pos += (sizeof(chan_list_tlv->header)
+                       + le16_to_cpu(chan_list_tlv->header.len));
+
+       if (bgscan_cfg_in->start_later) {
+               start_later_tlv =
+                       (struct mwifiex_ie_types_bgscan_start_later *)tlv_pos;
+               start_later_tlv->header.type =
+                       cpu_to_le16(TLV_TYPE_BGSCAN_START_LATER);
+               start_later_tlv->header.len =
+                       cpu_to_le16(sizeof(start_later_tlv->start_later));
+               start_later_tlv->start_later =
+                       cpu_to_le16(bgscan_cfg_in->start_later);
+
+               tlv_pos += sizeof(start_later_tlv->header) +
+                       le16_to_cpu(start_later_tlv->header.len);
+       }
+
+       /* Append vendor specific IE TLV */
+       mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos);
+
+       le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
+
+       return 0;
+}
+
+int mwifiex_stop_bg_scan(struct mwifiex_private *priv)
+{
+       struct mwifiex_bg_scan_cfg *bgscan_cfg;
+
+       if (!priv->sched_scanning) {
+               dev_dbg(priv->adapter->dev, "bgscan already stopped!\n");
+               return 0;
+       }
+
+       bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL);
+       if (!bgscan_cfg)
+               return -ENOMEM;
+
+       bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA;
+       bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET;
+       bgscan_cfg->enable = false;
+
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG,
+                            HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) {
+               kfree(bgscan_cfg);
+               return -EFAULT;
+       }
+
+       kfree(bgscan_cfg);
+       priv->sched_scanning = false;
+
+       return 0;
+}
+
 static void
 mwifiex_update_chan_statistics(struct mwifiex_private *priv,
                               struct mwifiex_ietypes_chanstats *tlv_stat)
index 4c8cae682c89c727de4518d3b78d42da388d0ac6..abf15dbdfe08e8e92d97e292c66d8a26e30026ff 100644 (file)
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
 
        /* Disable Host Sleep */
        mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
-                         MWIFIEX_ASYNC_CMD);
+                         MWIFIEX_SYNC_CMD);
 
        return 0;
 }
@@ -1039,19 +1039,14 @@ done:
 
 /*
  * This function checks the firmware status in card.
- *
- * The winner interface is also determined by this function.
  */
 static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
                                   u32 poll_num)
 {
-       struct sdio_mmc_card *card = adapter->card;
        int ret = 0;
        u16 firmware_stat;
        u32 tries;
-       u8 winner_status;
 
-       /* Wait for firmware initialization event */
        for (tries = 0; tries < poll_num; tries++) {
                ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
                if (ret)
@@ -1065,16 +1060,25 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
                }
        }
 
-       if (ret) {
-               if (mwifiex_read_reg
-                   (adapter, card->reg->status_reg_0, &winner_status))
-                       winner_status = 0;
+       return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+       int ret = 0;
+       u8 winner = 0;
+       struct sdio_mmc_card *card = adapter->card;
+
+       if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
+               return -1;
+
+       if (winner)
+               adapter->winner = 0;
+       else
+               adapter->winner = 1;
 
-               if (winner_status)
-                       adapter->winner = 0;
-               else
-                       adapter->winner = 1;
-       }
        return ret;
 }
 
@@ -2620,6 +2624,7 @@ static struct mwifiex_if_ops sdio_ops = {
        .init_if = mwifiex_init_sdio,
        .cleanup_if = mwifiex_cleanup_sdio,
        .check_fw_status = mwifiex_check_fw_status,
+       .check_winner_status = mwifiex_check_winner_status,
        .prog_fw = mwifiex_prog_fw_w_helper,
        .register_dev = mwifiex_register_dev,
        .unregister_dev = mwifiex_unregister_dev,
index e486867a4c67520fc2a8a8520368009975fd8878..30f152601c5774e266219066acee03f2cb3b21a8 100644 (file)
@@ -1813,6 +1813,22 @@ static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
        return 0;
 }
 
+/* This function prepares command to get HS wakeup reason.
+ *
+ * Preparation includes -
+ *      - Setting command ID, action and proper size
+ *      - Ensuring correct endian-ness
+ */
+static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv,
+                                        struct host_cmd_ds_command *cmd)
+{
+       cmd->command = cpu_to_le16(HostCmd_CMD_HS_WAKEUP_REASON);
+       cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_wakeup_reason) +
+                               S_DS_GEN);
+
+       return 0;
+}
+
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1873,6 +1889,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_802_11_SCAN:
                ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf);
                break;
+       case HostCmd_CMD_802_11_BG_SCAN_CONFIG:
+               ret = mwifiex_cmd_802_11_bg_scan_config(priv, cmd_ptr,
+                                                       data_buf);
+               break;
        case HostCmd_CMD_802_11_BG_SCAN_QUERY:
                ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr);
                break;
@@ -2063,6 +2083,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
                                                   data_buf);
                break;
+       case HostCmd_CMD_HS_WAKEUP_REASON:
+               ret = mwifiex_cmd_get_wakeup_reason(priv, cmd_ptr);
+               break;
        case HostCmd_CMD_MC_POLICY:
                ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
                                                data_buf);
index 9ac7aa2431b41533ab68f8495693c36c41f40801..d96523e10eb46ef051a1b40a35f408d42dc0404d 100644 (file)
@@ -1076,9 +1076,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_802_11_BG_SCAN_QUERY:
                ret = mwifiex_ret_802_11_scan(priv, resp);
+               cfg80211_sched_scan_results(priv->wdev.wiphy);
                mwifiex_dbg(adapter, CMD,
                            "info: CMD_RESP: BG_SCAN result is ready!\n");
                break;
+       case HostCmd_CMD_802_11_BG_SCAN_CONFIG:
+               break;
        case HostCmd_CMD_TXPWR_CFG:
                ret = mwifiex_ret_tx_power_cfg(priv, resp);
                break;
@@ -1233,6 +1236,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
                ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
                break;
+       case HostCmd_CMD_HS_WAKEUP_REASON:
+               ret = mwifiex_ret_wakeup_reason(priv, resp, data_buf);
+               break;
        case HostCmd_CMD_TDLS_CONFIG:
                break;
        case HostCmd_CMD_ROBUST_COEX:
index ff3ee9dfbbd54f51dbd42f8ff5478d78bc63013b..070bce401151a522983a3ce20fe6055ae4113612 100644 (file)
@@ -92,6 +92,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        priv->is_data_rate_auto = true;
        priv->data_rate = 0;
 
+       priv->assoc_resp_ht_param = 0;
+       priv->ht_param_present = false;
+
        if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
             GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data)
                mwifiex_hist_data_reset(priv);
@@ -607,11 +610,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_PS_AWAKE:
                mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
-               if (!adapter->pps_uapsd_mode && priv->port_open &&
+               if (!adapter->pps_uapsd_mode &&
+                   (priv->port_open ||
+                    (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
                    priv->media_connected && adapter->sleep_period.period) {
-                               adapter->pps_uapsd_mode = true;
-                               mwifiex_dbg(adapter, EVENT,
-                                           "event: PPS/UAPSD mode activated\n");
+                       adapter->pps_uapsd_mode = true;
+                       mwifiex_dbg(adapter, EVENT,
+                                   "event: PPS/UAPSD mode activated\n");
                }
                adapter->tx_lock_flag = false;
                if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -686,6 +691,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                       HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
 
+       case EVENT_BG_SCAN_STOPPED:
+               dev_dbg(adapter->dev, "event: BGS_STOPPED\n");
+               cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+               if (priv->sched_scanning)
+                       priv->sched_scanning = false;
+               break;
+
        case EVENT_PORT_RELEASE:
                mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
                priv->port_open = true;
index 6a4fc5d183cfec34780d8ac0d6dcd2f30e0b7f0c..5cbee58f8781386d945b70073d385665ea8798e5 100644 (file)
@@ -504,6 +504,20 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
                }
        }
 
+       priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+       if (priv && priv->sched_scanning) {
+#ifdef CONFIG_PM
+               if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+#endif
+                       mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
+                       mwifiex_stop_bg_scan(priv);
+                       cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+#ifdef CONFIG_PM
+               }
+#endif
+       }
+
        if (adapter->hs_activated) {
                mwifiex_dbg(adapter, CMD,
                            "cmd: HS Already activated\n");
@@ -1114,11 +1128,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
  * with requisite parameters and calls the IOCTL handler.
  */
 int
-mwifiex_get_ver_ext(struct mwifiex_private *priv)
+mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
 {
        struct mwifiex_ver_ext ver_ext;
 
        memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
+       ver_ext.version_str_sel = version_str_sel;
        if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
                             HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
                return -1;
@@ -1450,3 +1465,19 @@ mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
 
        return 0;
 }
+
+/* This function get Host Sleep wake up reason.
+ *
+ */
+int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
+                             int cmd_type,
+                             struct mwifiex_ds_wakeup_reason *wakeup_reason)
+{
+       int status = 0;
+
+       status = mwifiex_send_cmd(priv, HostCmd_CMD_HS_WAKEUP_REASON,
+                                 HostCmd_ACT_GEN_GET, 0, wakeup_reason,
+                                 cmd_type == MWIFIEX_SYNC_CMD);
+
+       return status;
+}
index acccd6734e3b332cb172789e9b210c077465371f..0eb246502e1d119b03d83874cba3030ac4849092 100644 (file)
@@ -438,6 +438,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                mwifiex_set_ba_params(priv);
                mwifiex_reset_11n_rx_seq_num(priv);
 
+               priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -475,7 +476,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
                priv = adapter->priv[i];
                if (!priv)
                        continue;
-               if (!priv->port_open)
+               if (!priv->port_open &&
+                   (priv->bss_mode != NL80211_IFTYPE_ADHOC))
                        continue;
                if (adapter->if_ops.is_port_ready &&
                    !adapter->if_ops.is_port_ready(priv))
@@ -1099,7 +1101,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
 
                        priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
 
-                       if (!priv_tmp->port_open ||
+                       if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
+                            !priv_tmp->port_open) ||
                            (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
                                continue;
 
index 30e3aaae32e2288ed1f7541b6da7a42eb6a8729a..088429d0a634d8c4372d45e2dc1cd3b7d2fabf1e 100644 (file)
@@ -5421,11 +5421,13 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
 
 static int
 mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  enum ieee80211_ampdu_mlme_action action,
-                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                  u8 buf_size, bool amsdu)
+                  struct ieee80211_ampdu_params *params)
 {
-
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
        int i, rc = 0;
        struct mwl8k_priv *priv = hw->priv;
        struct mwl8k_ampdu_stream *stream;
index f715eee398510df829a61ba0f431610d1ab32baa..e70dd95239117f7e0ef8a2d668b6798727a06b46 100644 (file)
@@ -334,11 +334,13 @@ static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
 static int
 mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                 enum ieee80211_ampdu_mlme_action action,
-                 struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
-                 bool amsdu)
+                 struct ieee80211_ampdu_params *params)
 {
        struct mt7601u_dev *dev = hw->priv;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
        struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
 
        WARN_ON(msta->wcid.idx > GROUP_WCID(0));
index 9a3966cd6fbedb0e41e1f2c741405b10aee92fdc..155f343981fe29679d278b0621d3cf7ce64b2a77 100644 (file)
@@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
        rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
index 1a6740b4d396211cd3c00290cd9abb06128c55bf..2553cdd7406623cde30a99531e4d86a74ae4836e 100644 (file)
@@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
index d26018f30b7dfa9e8381ceb609d5eddd10ff47a8..2d64611de300146d98bcc254171f961e3af26831 100644 (file)
@@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
index 9733b31a780d380fd2bfd550efe155943b0fdb34..7fa0128de7e310be2978b35a97746bc011acb324 100644 (file)
@@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_FCSFAIL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
                           !(filter_flags & FIF_PLCPFAIL));
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
@@ -7935,10 +7936,11 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 EXPORT_SYMBOL_GPL(rt2800_get_tsf);
 
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu)
+                       struct ieee80211_ampdu_params *params)
 {
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
        struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
        int ret = 0;
 
index 440790b92b19e2927fd9b9d7446cbd3c8eef4f30..83f1a44fb9b481cb5f2a8dda9e9914b8113e91bb 100644 (file)
@@ -218,9 +218,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw,
                   const struct ieee80211_tx_queue_params *params);
 u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu);
+                       struct ieee80211_ampdu_params *params);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
                      struct survey_info *survey);
 void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
index 3282ddb766f4224a8e10e037fa7ea4332dfe0e27..6418620f95ff62a7e1a562b7cb29d6e306d2d4ad 100644 (file)
  * amount of bytes needed to move the data.
  */
 #define ALIGN_SIZE(__skb, __header) \
-       (  ((unsigned long)((__skb)->data + (__header))) & 3 )
+       (((unsigned long)((__skb)->data + (__header))) & 3)
 
 /*
  * Constants for extra TX headroom for alignment purposes.
 #define SLOT_TIME              20
 #define SHORT_SLOT_TIME                9
 #define SIFS                   10
-#define PIFS                   ( SIFS + SLOT_TIME )
-#define SHORT_PIFS             ( SIFS + SHORT_SLOT_TIME )
-#define DIFS                   ( PIFS + SLOT_TIME )
-#define SHORT_DIFS             ( SHORT_PIFS + SHORT_SLOT_TIME )
-#define EIFS                   ( SIFS + DIFS + \
-                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-#define SHORT_EIFS             ( SIFS + SHORT_DIFS + \
-                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
+#define PIFS                   (SIFS + SLOT_TIME)
+#define SHORT_PIFS             (SIFS + SHORT_SLOT_TIME)
+#define DIFS                   (PIFS + SLOT_TIME)
+#define SHORT_DIFS             (SHORT_PIFS + SHORT_SLOT_TIME)
+#define EIFS                   (SIFS + DIFS + \
+                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10))
+#define SHORT_EIFS             (SIFS + SHORT_DIFS + \
+                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10))
 
 enum rt2x00_chip_intf {
        RT2X00_CHIP_INTF_PCI,
@@ -669,6 +669,7 @@ enum rt2x00_state_flags {
        CONFIG_POWERSAVING,
        CONFIG_HT_DISABLED,
        CONFIG_QOS_DISABLED,
+       CONFIG_MONITORING,
 
        /*
         * Mark we currently are sequentially reading TX_STA_FIFO register
index 7e8bb1198ae9f749dfc3e419f1ae652ffa7a0b8d..6a1f508d472f0e6ac3be534d780d209e90a0278d 100644 (file)
@@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
        else
                clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
 
+       if (conf->flags & IEEE80211_CONF_MONITOR)
+               set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+       else
+               clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+
        rt2x00dev->curr_band = conf->chandef.chan->band;
        rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
        rt2x00dev->tx_power = conf->power_level;
index 90fdb02b55e79debd5bd89b31104e90a260c596c..25ee3cb8e982db287ac269cac97666ad7dd60ec6 100644 (file)
@@ -629,7 +629,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
        data += sprintf(data, "register\tbase\twords\twordsize\n");
 #define RT2X00DEBUGFS_SPRINTF_REGISTER(__name)                 \
 {                                                              \
-       if(debug->__name.read)                                  \
+       if (debug->__name.read)                                 \
                data += sprintf(data, __stringify(__name)       \
                                "\t%d\t%d\t%d\n",               \
                                debug->__name.word_base,        \
@@ -699,7 +699,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
 
 #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name)                    \
 ({                                                                             \
-       if(debug->__name.read) {                                                \
+       if (debug->__name.read) {                                               \
                (__intf)->__name##_off_entry =                                  \
                debugfs_create_u32(__stringify(__name) "_offset",               \
                                       S_IRUSR | S_IWUSR,                       \
index 3c26ee65a41513127cbc999416ee90fabc11d890..13da95a24cf77bb366481252226f3c4566352d67 100644 (file)
@@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
                        *total_flags |= FIF_PSPOLL;
        }
 
-       /*
-        * Check if there is any work left for us.
-        */
-       if (rt2x00dev->packet_filter == *total_flags)
-               return;
        rt2x00dev->packet_filter = *total_flags;
 
        rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
index c0e730ea1b69058a663f953e73165fa23a850c72..24a3436ef952870a118e0adddf5f06fa3c5ca6f5 100644 (file)
@@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
                           !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
index 1442075a838218f61366d84d1ec5d40fb45bc7be..ab8641547a1f6a61f763f9ab7dfc2e8e15dd7a4f 100644 (file)
 #define PAIRWISE_TA_TABLE_BASE         0x1a00
 
 #define SHARED_KEY_ENTRY(__idx) \
-       ( SHARED_KEY_TABLE_BASE + \
-               ((__idx) * sizeof(struct hw_key_entry)) )
+       (SHARED_KEY_TABLE_BASE + \
+               ((__idx) * sizeof(struct hw_key_entry)))
 #define PAIRWISE_KEY_ENTRY(__idx) \
-       ( PAIRWISE_KEY_TABLE_BASE + \
-               ((__idx) * sizeof(struct hw_key_entry)) )
+       (PAIRWISE_KEY_TABLE_BASE + \
+               ((__idx) * sizeof(struct hw_key_entry)))
 #define PAIRWISE_TA_ENTRY(__idx) \
-       ( PAIRWISE_TA_TABLE_BASE + \
-               ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
+       (PAIRWISE_TA_TABLE_BASE + \
+               ((__idx) * sizeof(struct hw_pairwise_ta_entry)))
 
 struct hw_key_entry {
        u8 key[16];
@@ -180,7 +180,7 @@ struct hw_pairwise_ta_entry {
 #define HW_BEACON_BASE3                        0x2f00
 
 #define HW_BEACON_OFFSET(__index) \
-       ( HW_BEACON_BASE0 + (__index * 0x0100) )
+       (HW_BEACON_BASE0 + (__index * 0x0100))
 
 /*
  * HOST-MCU shared memory.
@@ -1287,9 +1287,9 @@ struct hw_pairwise_ta_entry {
 /*
  * DMA descriptor defines.
  */
-#define TXD_DESC_SIZE                  ( 16 * sizeof(__le32) )
-#define TXINFO_SIZE                    ( 6 * sizeof(__le32) )
-#define RXD_DESC_SIZE                  ( 16 * sizeof(__le32) )
+#define TXD_DESC_SIZE                  (16 * sizeof(__le32))
+#define TXINFO_SIZE                    (6 * sizeof(__le32))
+#define RXD_DESC_SIZE                  (16 * sizeof(__le32))
 
 /*
  * TX descriptor format for TX, PRIO and Beacon Ring.
index 7081e13b4fd67ebbaa7fa8af3f15a1d81fbc219e..7bbc869311681988dbbcd50907fd33d84f4f1a85 100644 (file)
@@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
                           !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
index 6aed923a709ae3606225307cdcb2b7170442e11d..7d820c3953754260c05a5f7e21478e1cbf14555e 100644 (file)
@@ -5375,13 +5375,13 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
 static int
 rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                     enum ieee80211_ampdu_mlme_action action,
-                     struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
-                     bool amsdu)
+                     struct ieee80211_ampdu_params *params)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
        struct device *dev = &priv->udev->dev;
        u8 ampdu_factor, ampdu_density;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
 
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
index 4ae421ef30d94f4f8bc93d85645eeda4548ba835..f2507610314ba81b94accf2a94cfb0284c11b641 100644 (file)
@@ -1371,11 +1371,13 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
 
 static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
-                              enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                              u8 buf_size, bool amsdu)
+                              struct ieee80211_ampdu_params *params)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
index 74c14ce28238eed70f8ad798f0724642147411d3..28f7010e7108bf3c3f8d3a89296b2e0b16f2c555 100644 (file)
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
                    ((wireless_mode == WIRELESS_MODE_N_5G) ||
                     (wireless_mode == WIRELESS_MODE_N_24G)))
                        rate->flags |= IEEE80211_TX_RC_MCS;
+               if (sta && sta->vht_cap.vht_supported &&
+                   (wireless_mode == WIRELESS_MODE_AC_5G ||
+                    wireless_mode == WIRELESS_MODE_AC_24G ||
+                    wireless_mode == WIRELESS_MODE_AC_ONLY))
+                       rate->flags |= IEEE80211_TX_RC_VHT_MCS;
        }
 }
 
index a62bf0a65c321bb73553c403c5233ccd30d1f8c7..5be34118e0af22b69392f5caf07f9eef5672c7fe 100644 (file)
@@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
        case COUNTRY_CODE_SPAIN:
        case COUNTRY_CODE_FRANCE:
        case COUNTRY_CODE_ISRAEL:
-       case COUNTRY_CODE_WORLD_WIDE_13:
                return &rtl_regdom_12_13;
        case COUNTRY_CODE_MKK:
        case COUNTRY_CODE_MKK1:
@@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
                return &rtl_regdom_14_60_64;
        case COUNTRY_CODE_GLOBAL_DOMAIN:
                return &rtl_regdom_14;
+       case COUNTRY_CODE_WORLD_WIDE_13:
        case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
                return &rtl_regdom_12_13_5g_all;
        default:
index b5bcc933a2a683df412139b1204fee320096a889..4df992de7d0731508a6c42fccf51096928156e9e 100644 (file)
@@ -659,29 +659,24 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
  *                              informs the f/w regarding this.
  * @hw: Pointer to the ieee80211_hw structure.
  * @vif: Pointer to the ieee80211_vif structure.
- * @action: ieee80211_ampdu_mlme_action enum.
- * @sta: Pointer to the ieee80211_sta structure.
- * @tid: Traffic identifier.
- * @ssn: Pointer to ssn value.
- * @buf_size: Buffer size (for kernel version > 2.6.38).
- * @amsdu: is AMSDU in AMPDU allowed
+ * @params: Pointer to A-MPDU action parameters
  *
  * Return: status: 0 on success, negative error code on failure.
  */
 static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif,
-                                    enum ieee80211_ampdu_mlme_action action,
-                                    struct ieee80211_sta *sta,
-                                    unsigned short tid,
-                                    unsigned short *ssn,
-                                    unsigned char buf_size,
-                                    bool amsdu)
+                                    struct ieee80211_ampdu_params *params)
 {
        int status = -EOPNOTSUPP;
        struct rsi_hw *adapter = hw->priv;
        struct rsi_common *common = adapter->priv;
        u16 seq_no = 0;
        u8 ii = 0;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
 
        for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
                if (vif == adapter->vifs[ii])
index 06321c799c902689825c89098c796e875c737079..d0ddcde6c695fb6c5efd9b2283c76e98328fdbdf 100644 (file)
@@ -2129,9 +2129,7 @@ void cw1200_mcast_timeout(unsigned long arg)
 
 int cw1200_ampdu_action(struct ieee80211_hw *hw,
                        struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu)
+                       struct ieee80211_ampdu_params *params)
 {
        /* Aggregation is implemented fully in firmware,
         * including block ack negotiation. Do not allow
index bebb3379017f6d40c3cd58b92ddfaabab1259f59..a0bacaa39b3193f230c61526afe6bdc68f38b7a0 100644 (file)
@@ -109,9 +109,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
                             u32 changed);
 int cw1200_ampdu_action(struct ieee80211_hw *hw,
                        struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu);
+                       struct ieee80211_ampdu_params *params);
 
 void cw1200_suspend_resume(struct cw1200_common *priv,
                          struct wsm_suspend_resume *arg);
index 969c9d79bfc8bd92c2c4e48c213c8e88c9611121..8a8f1e7113846af0ac1bec70d19c815dcf60c9b1 100644 (file)
@@ -13,7 +13,7 @@ config WLCORE
 
 config WLCORE_SPI
        tristate "TI wlcore SPI support"
-       depends on WLCORE && SPI_MASTER
+       depends on WLCORE && SPI_MASTER && OF
        select CRC7
        ---help---
          This module adds support for the SPI interface of adapters using
index c96405498bf43c68b997e73d512e88be4b153ec0..4b59f67724dead0621174da68b2e6167506b3b95 100644 (file)
@@ -38,7 +38,7 @@
 
 int wlcore_event_fw_logger(struct wl1271 *wl)
 {
-       u32 ret;
+       int ret;
        struct fw_logger_information fw_log;
        u8  *buffer;
        u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
index d1109c4f0f0d1d7570ec373734db3f93117556f9..45662cf3169f78dd20554cde3373336f84cb6516 100644 (file)
@@ -5187,14 +5187,16 @@ out:
 
 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
-                                 enum ieee80211_ampdu_mlme_action action,
-                                 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                 u8 buf_size, bool amsdu)
+                                 struct ieee80211_ampdu_params *params)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
        u8 hlid, *ba_bitmap;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
                     tid);
index 44f059f7f34e9473ce4e4b33648a3af9b2e44f1e..020ac1a4b40834131d6355a7464b73129fd93ad1 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
 #include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
 
 #include "wlcore.h"
 #include "wl12xx_80211.h"
@@ -81,6 +83,7 @@
 struct wl12xx_spi_glue {
        struct device *dev;
        struct platform_device *core;
+       struct regulator *reg; /* Power regulator */
 };
 
 static void wl12xx_spi_reset(struct device *child)
@@ -318,14 +321,76 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
        return 0;
 }
 
+/**
+ * wl12xx_spi_set_power - power on/off the wl12xx unit
+ * @child: wl12xx device handle.
+ * @enable: true/false to power on/off the unit.
+ *
+ * use the WiFi enable regulator to enable/disable the WiFi unit.
+ */
+static int wl12xx_spi_set_power(struct device *child, bool enable)
+{
+       int ret = 0;
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+
+       WARN_ON(!glue->reg);
+
+       /* Update regulator state */
+       if (enable) {
+               ret = regulator_enable(glue->reg);
+               if (ret)
+                       dev_err(child, "Power enable failure\n");
+       } else {
+               ret =  regulator_disable(glue->reg);
+               if (ret)
+                       dev_err(child, "Power disable failure\n");
+       }
+
+       return ret;
+}
+
 static struct wl1271_if_operations spi_ops = {
        .read           = wl12xx_spi_raw_read,
        .write          = wl12xx_spi_raw_write,
        .reset          = wl12xx_spi_reset,
        .init           = wl12xx_spi_init,
+       .power          = wl12xx_spi_set_power,
        .set_block_size = NULL,
 };
 
+static const struct of_device_id wlcore_spi_of_match_table[] = {
+       { .compatible = "ti,wl1271" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table);
+
+/**
+ * wlcore_probe_of - DT node parsing.
+ * @spi: SPI slave device parameters.
+ * @res: resource parameters.
+ * @glue: wl12xx SPI bus to slave device glue parameters.
+ * @pdev_data: wlcore device parameters
+ */
+static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
+                          struct wlcore_platdev_data *pdev_data)
+{
+       struct device_node *dt_node = spi->dev.of_node;
+       int ret;
+
+       if (of_find_property(dt_node, "clock-xtal", NULL))
+               pdev_data->ref_clock_xtal = true;
+
+       ret = of_property_read_u32(dt_node, "ref-clock-frequency",
+                                  &pdev_data->ref_clock_freq);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(glue->dev,
+                       "can't get reference clock frequency (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int wl1271_probe(struct spi_device *spi)
 {
        struct wl12xx_spi_glue *glue;
@@ -335,8 +400,6 @@ static int wl1271_probe(struct spi_device *spi)
 
        memset(&pdev_data, 0x00, sizeof(pdev_data));
 
-       /* TODO: add DT parsing when needed */
-
        pdev_data.if_ops = &spi_ops;
 
        glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
@@ -353,6 +416,21 @@ static int wl1271_probe(struct spi_device *spi)
         * comes from the board-peripherals file */
        spi->bits_per_word = 32;
 
+       glue->reg = devm_regulator_get(&spi->dev, "vwlan");
+       if (PTR_ERR(glue->reg) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       if (IS_ERR(glue->reg)) {
+               dev_err(glue->dev, "can't get regulator\n");
+               return PTR_ERR(glue->reg);
+       }
+
+       ret = wlcore_probe_of(spi, glue, &pdev_data);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(glue->dev,
+                       "can't get device tree parameters (%d)\n", ret);
+               return ret;
+       }
+
        ret = spi_setup(spi);
        if (ret < 0) {
                dev_err(glue->dev, "spi_setup failed\n");
@@ -370,7 +448,7 @@ static int wl1271_probe(struct spi_device *spi)
        memset(res, 0x00, sizeof(res));
 
        res[0].start = spi->irq;
-       res[0].flags = IORESOURCE_IRQ;
+       res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq);
        res[0].name = "irq";
 
        ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
@@ -408,10 +486,10 @@ static int wl1271_remove(struct spi_device *spi)
        return 0;
 }
 
-
 static struct spi_driver wl1271_spi_driver = {
        .driver = {
                .name           = "wl1271_spi",
+               .of_match_table = of_match_ptr(wlcore_spi_of_match_table),
        },
 
        .probe          = wl1271_probe,
index 0333ab0fd9267f2910398bfc3270f615e76cc686..112825200d4192d6da01ae5b70ba52e41c886ffc 100644 (file)
@@ -251,6 +251,7 @@ struct xenvif {
        unsigned int stalled_queues;
 
        struct xenbus_watch credit_watch;
+       struct xenbus_watch mcast_ctrl_watch;
 
        spinlock_t lock;
 
index 56ebd8267386e6a91cabf506962e15f5d83531ec..39a303de20dd4e0f37bfa00d4c5eaade6590519c 100644 (file)
@@ -327,7 +327,7 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
-               /* We support multicast-control. */
+               /* We support dynamic multicast-control. */
                err = xenbus_printf(xbt, dev->nodename,
                                    "feature-multicast-control", "%d", 1);
                if (err) {
@@ -335,6 +335,14 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "feature-dynamic-multicast-control",
+                                   "%d", 1);
+               if (err) {
+                       message = "writing feature-dynamic-multicast-control";
+                       goto abort_transaction;
+               }
+
                err = xenbus_transaction_end(xbt, 0);
        } while (err == -EAGAIN);
 
@@ -683,7 +691,8 @@ static void xen_net_rate_changed(struct xenbus_watch *watch,
        }
 }
 
-static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+static int xen_register_credit_watch(struct xenbus_device *dev,
+                                    struct xenvif *vif)
 {
        int err = 0;
        char *node;
@@ -708,7 +717,7 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
        return err;
 }
 
-static void xen_unregister_watchers(struct xenvif *vif)
+static void xen_unregister_credit_watch(struct xenvif *vif)
 {
        if (vif->credit_watch.node) {
                unregister_xenbus_watch(&vif->credit_watch);
@@ -717,6 +726,75 @@ static void xen_unregister_watchers(struct xenvif *vif)
        }
 }
 
+static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
+                                  const char **vec, unsigned int len)
+{
+       struct xenvif *vif = container_of(watch, struct xenvif,
+                                         mcast_ctrl_watch);
+       struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
+       int val;
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend,
+                        "request-multicast-control", "%d", &val) < 0)
+               val = 0;
+       vif->multicast_control = !!val;
+}
+
+static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
+                                        struct xenvif *vif)
+{
+       int err = 0;
+       char *node;
+       unsigned maxlen = strlen(dev->otherend) +
+               sizeof("/request-multicast-control");
+
+       if (vif->mcast_ctrl_watch.node) {
+               pr_err_ratelimited("Watch is already registered\n");
+               return -EADDRINUSE;
+       }
+
+       node = kmalloc(maxlen, GFP_KERNEL);
+       if (!node) {
+               pr_err("Failed to allocate memory for watch\n");
+               return -ENOMEM;
+       }
+       snprintf(node, maxlen, "%s/request-multicast-control",
+                dev->otherend);
+       vif->mcast_ctrl_watch.node = node;
+       vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
+       err = register_xenbus_watch(&vif->mcast_ctrl_watch);
+       if (err) {
+               pr_err("Failed to set watcher %s\n",
+                      vif->mcast_ctrl_watch.node);
+               kfree(node);
+               vif->mcast_ctrl_watch.node = NULL;
+               vif->mcast_ctrl_watch.callback = NULL;
+       }
+       return err;
+}
+
+static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
+{
+       if (vif->mcast_ctrl_watch.node) {
+               unregister_xenbus_watch(&vif->mcast_ctrl_watch);
+               kfree(vif->mcast_ctrl_watch.node);
+               vif->mcast_ctrl_watch.node = NULL;
+       }
+}
+
+static void xen_register_watchers(struct xenbus_device *dev,
+                                 struct xenvif *vif)
+{
+       xen_register_credit_watch(dev, vif);
+       xen_register_mcast_ctrl_watch(dev, vif);
+}
+
+static void xen_unregister_watchers(struct xenvif *vif)
+{
+       xen_unregister_mcast_ctrl_watch(vif);
+       xen_unregister_credit_watch(vif);
+}
+
 static void unregister_hotplug_status_watch(struct backend_info *be)
 {
        if (be->have_hotplug_status_watch) {
@@ -1030,11 +1108,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
                val = 0;
        vif->ipv6_csum = !!val;
 
-       if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
-                        "%d", &val) < 0)
-               val = 0;
-       vif->multicast_control = !!val;
-
        return 0;
 }
 
index d6abf191122a3260cbd97f5c1a2dbd1c2cb5c5e2..96ccd4e943db49908b0821f1d515aef36072db15 100644 (file)
@@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
        RING_IDX cons, prod;
        unsigned short id;
        struct sk_buff *skb;
+       bool more_to_do;
 
        BUG_ON(!netif_carrier_ok(queue->info->netdev));
 
@@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
 
                queue->tx.rsp_cons = prod;
 
-               /*
-                * Set a new event, then check for race with update of tx_cons.
-                * Note that it is essential to schedule a callback, no matter
-                * how few buffers are pending. Even if there is space in the
-                * transmit ring, higher layers may be blocked because too much
-                * data is outstanding: in such cases notification from Xen is
-                * likely to be the only kick that we'll get.
-                */
-               queue->tx.sring->rsp_event =
-                       prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
-               mb();           /* update shared area */
-       } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
+               RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
+       } while (more_to_do);
 
        xennet_maybe_wake_tx(queue);
 }
index 8ebfcaae3f5a06a4aca19611fc7c5abdeb29f16a..9edf7eb7d17cc39713e67e4dd62d4e16c0d93e07 100644 (file)
@@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev,
 
        device_lock(dev);
        claim = ndns->claim;
-       if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim)))
-               mode = "memory";
-       else if (claim && is_nd_btt(claim))
+       if (claim && is_nd_btt(claim))
                mode = "safe";
+       else if (claim && is_nd_pfn(claim))
+               mode = "memory";
+       else if (!claim && pmem_should_map_pages(dev))
+               mode = "memory";
        else
                mode = "raw";
        rc = sprintf(buf, "%s\n", mode);
index 0cc9048b86e23103900d36f75a0b64746521ee18..ae81a2f1da50b1e9f9cf5a14bd180df19b3ccab1 100644 (file)
@@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
 
        switch (le32_to_cpu(pfn_sb->mode)) {
        case PFN_MODE_RAM:
-               break;
        case PFN_MODE_PMEM:
-               /* TODO: allocate from PMEM support */
-               return -ENOTTY;
+               break;
        default:
                return -ENXIO;
        }
index 6fd4e5a5ef4a495bbd412ee33b931f4fb3a8a24f..9d11d98373128fef3de3406975d8bcc2ca286b9a 100644 (file)
@@ -70,6 +70,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
        if (pos >= nvmem->size)
                return 0;
 
+       if (count < nvmem->word_size)
+               return -EINVAL;
+
        if (pos + count > nvmem->size)
                count = nvmem->size - pos;
 
@@ -95,6 +98,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
        if (pos >= nvmem->size)
                return 0;
 
+       if (count < nvmem->word_size)
+               return -EINVAL;
+
        if (pos + count > nvmem->size)
                count = nvmem->size - pos;
 
index afb67e7eeee4a89612d56d0404726d7778c12a53..3829e5fbf8c366bf3ae7fa7149e87e5c0ddcf6f3 100644 (file)
@@ -21,6 +21,7 @@ static struct regmap_config qfprom_regmap_config = {
        .reg_bits = 32,
        .val_bits = 8,
        .reg_stride = 1,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static struct nvmem_config econfig = {
index 655f79db7899ffd0628714d51203847630a8075c..1f98156f8996401f525eedbfde2053d9cd95f807 100644 (file)
@@ -976,13 +976,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
+#ifndef MIN_MEMBLOCK_ADDR
+#define MIN_MEMBLOCK_ADDR      __pa(PAGE_OFFSET)
+#endif
 #ifndef MAX_MEMBLOCK_ADDR
 #define MAX_MEMBLOCK_ADDR      ((phys_addr_t)~0)
 #endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       const u64 phys_offset = __pa(PAGE_OFFSET);
+       const u64 phys_offset = MIN_MEMBLOCK_ADDR;
 
        if (!PAGE_ALIGNED(base)) {
                if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
index 706e3ff67f8b02f4403a841c8b92f66f3c0e5678..7ee21ae305ae10d480adce961ad59fe284ca366e 100644 (file)
@@ -679,18 +679,6 @@ u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
        return __of_msi_map_rid(dev, &msi_np, rid_in);
 }
 
-static struct irq_domain *__of_get_msi_domain(struct device_node *np,
-                                             enum irq_domain_bus_token token)
-{
-       struct irq_domain *d;
-
-       d = irq_find_matching_host(np, token);
-       if (!d)
-               d = irq_find_host(np);
-
-       return d;
-}
-
 /**
  * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
  * @dev: device for which the mapping is to be done.
@@ -706,7 +694,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
        struct device_node *np = NULL;
 
        __of_msi_map_rid(dev, &np, rid);
-       return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI);
+       return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
 }
 
 /**
@@ -730,7 +718,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
        /* Check for a single msi-parent property */
        msi_np = of_parse_phandle(np, "msi-parent", 0);
        if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
-               d = __of_get_msi_domain(msi_np, token);
+               d = irq_find_matching_host(msi_np, token);
                if (!d)
                        of_node_put(msi_np);
                return d;
@@ -744,7 +732,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
                while (!of_parse_phandle_with_args(np, "msi-parent",
                                                   "#msi-cells",
                                                   index, &args)) {
-                       d = __of_get_msi_domain(args.np, token);
+                       d = irq_find_matching_host(args.np, token);
                        if (d)
                                return d;
 
index 86829f8064a61eb324a0a9ea28f63f0bf3ceadbd..39c4be41ef83d6cf23302fb9af6270f8857f85de 100644 (file)
@@ -143,11 +143,32 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
 }
 EXPORT_SYMBOL(of_mdio_parse_addr);
 
+/* The following is a list of PHY compatible strings which appear in
+ * some DTBs. The compatible string is never matched against a PHY
+ * driver, so is pointless. We only expect devices which are not PHYs
+ * to have a compatible string, so they can be matched to an MDIO
+ * driver.  Encourage users to upgrade their DT blobs to remove these.
+ */
+static const struct of_device_id whitelist_phys[] = {
+       { .compatible = "brcm,40nm-ephy" },
+       { .compatible = "marvell,88E1111", },
+       { .compatible = "marvell,88e1116", },
+       { .compatible = "marvell,88e1118", },
+       { .compatible = "marvell,88e1145", },
+       { .compatible = "marvell,88e1149r", },
+       { .compatible = "marvell,88e1310", },
+       { .compatible = "marvell,88E1510", },
+       { .compatible = "marvell,88E1514", },
+       { .compatible = "moxa,moxart-rtl8201cp", },
+       {}
+};
+
 /*
  * Return true if the child node is for a phy. It must either:
  * o Compatible string of "ethernet-phy-idX.X"
  * o Compatible string of "ethernet-phy-ieee802.3-c45"
  * o Compatible string of "ethernet-phy-ieee802.3-c22"
+ * o In the white list above (and issue a warning)
  * o No compatibility string
  *
  * A device which is not a phy is expected to have a compatible string
@@ -166,6 +187,13 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
        if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22"))
                return true;
 
+       if (of_match_node(whitelist_phys, child)) {
+               pr_warn(FW_WARN
+                       "%s: Whitelisted compatible string. Please remove\n",
+                       child->full_name);
+               return true;
+       }
+
        if (!of_find_property(child, "compatible", NULL))
                return true;
 
@@ -256,11 +284,19 @@ static int of_phy_match(struct device *dev, void *phy_np)
 struct phy_device *of_phy_find_device(struct device_node *phy_np)
 {
        struct device *d;
+       struct mdio_device *mdiodev;
+
        if (!phy_np)
                return NULL;
 
        d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
-       return d ? to_phy_device(d) : NULL;
+       if (d) {
+               mdiodev = to_mdio_device(d);
+               if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+                       return to_phy_device(d);
+       }
+
+       return NULL;
 }
 EXPORT_SYMBOL(of_phy_find_device);
 
index b1449f71601cb257f1057b77c217a943da92cab8..13f4fed3804850804dfa9ad285c403e68729c150 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/of_device.h>
 #include <linux/of_pci.h>
 #include <linux/slab.h>
-#include <asm-generic/pci-bridge.h>
 
 static inline int __of_pci_pci_compare(struct device_node *node,
                                       unsigned int data)
index 89b3befc7155325b12f21d4667a675b52423bb6d..f2187d491475a6144cd4b11cff7c9589e1a6c82c 100644 (file)
@@ -291,7 +291,12 @@ void pci_bus_add_device(struct pci_dev *dev)
 
        dev->match_driver = true;
        retval = device_attach(&dev->dev);
-       WARN_ON(retval < 0);
+       if (retval < 0) {
+               dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
+               pci_proc_detach_device(dev);
+               pci_remove_sysfs_dev_files(dev);
+               return;
+       }
 
        dev->is_added = 1;
 }
index fe600964fa50177bc17b28627568268608896010..bd3f7d0ef943d6af6c57ba60c592908fa8caddb9 100644 (file)
@@ -202,6 +202,23 @@ static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
        return 0;
 }
 
+static void imx6_pcie_reset_phy(struct pcie_port *pp)
+{
+       u32 tmp;
+
+       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+       tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+               PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+
+       usleep_range(2000, 3000);
+
+       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+       tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+                 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+}
+
 /*  Added for PCI abort handling */
 static int imx6q_pcie_abort_handler(unsigned long addr,
                unsigned int fsr, struct pt_regs *regs)
@@ -332,16 +349,30 @@ static int imx6_pcie_wait_for_link(struct pcie_port *pp)
 {
        unsigned int retries;
 
+       /*
+        * Test if the PHY reports that the link is up and also that the LTSSM
+        * training finished. There are three possible states of the link when
+        * this code is called:
+        * 1) The link is DOWN (unlikely)
+        *    The link didn't come up yet for some reason. This usually means
+        *    we have a real problem somewhere, if it happens with a peripheral
+        *    connected. This state calls for inspection of the DEBUG registers.
+        * 2) The link is UP, but still in LTSSM training
+        *    Wait for the training to finish, which should take a very short
+        *    time. If the training does not finish, we have a problem and we
+        *    need to inspect the DEBUG registers. If the training does finish,
+        *    the link is up and operating correctly.
+        * 3) The link is UP and no longer in LTSSM training
+        *    The link is up and operating correctly.
+        */
        for (retries = 0; retries < 200; retries++) {
-               if (dw_pcie_link_up(pp))
+               u32 reg = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+               if ((reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
+                   !(reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
                        return 0;
-               usleep_range(100, 1000);
+               usleep_range(1000, 2000);
        }
 
-       dev_err(pp->dev, "phy link never came up\n");
-       dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
-               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
-               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
        return -EINVAL;
 }
 
@@ -390,8 +421,10 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
                        IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
 
        ret = imx6_pcie_wait_for_link(pp);
-       if (ret)
-               return ret;
+       if (ret) {
+               dev_info(pp->dev, "Link never came up\n");
+               goto err_reset_phy;
+       }
 
        /* Allow Gen2 mode after the link is up. */
        tmp = readl(pp->dbi_base + PCIE_RC_LCR);
@@ -410,19 +443,28 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
        ret = imx6_pcie_wait_for_speed_change(pp);
        if (ret) {
                dev_err(pp->dev, "Failed to bring link up!\n");
-               return ret;
+               goto err_reset_phy;
        }
 
        /* Make sure link training is finished as well! */
        ret = imx6_pcie_wait_for_link(pp);
        if (ret) {
                dev_err(pp->dev, "Failed to bring link up!\n");
-               return ret;
+               goto err_reset_phy;
        }
 
        tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
        dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+
        return 0;
+
+err_reset_phy:
+       dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+       imx6_pcie_reset_phy(pp);
+
+       return ret;
 }
 
 static void imx6_pcie_host_init(struct pcie_port *pp)
@@ -441,81 +483,10 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
                dw_pcie_msi_init(pp);
 }
 
-static void imx6_pcie_reset_phy(struct pcie_port *pp)
-{
-       u32 tmp;
-
-       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
-       tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
-               PHY_RX_OVRD_IN_LO_RX_PLL_EN);
-       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
-
-       usleep_range(2000, 3000);
-
-       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
-       tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
-                 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
-       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
-}
-
 static int imx6_pcie_link_up(struct pcie_port *pp)
 {
-       u32 rc, debug_r0, rx_valid;
-       int count = 5;
-
-       /*
-        * Test if the PHY reports that the link is up and also that the LTSSM
-        * training finished. There are three possible states of the link when
-        * this code is called:
-        * 1) The link is DOWN (unlikely)
-        *     The link didn't come up yet for some reason. This usually means
-        *     we have a real problem somewhere. Reset the PHY and exit. This
-        *     state calls for inspection of the DEBUG registers.
-        * 2) The link is UP, but still in LTSSM training
-        *     Wait for the training to finish, which should take a very short
-        *     time. If the training does not finish, we have a problem and we
-        *     need to inspect the DEBUG registers. If the training does finish,
-        *     the link is up and operating correctly.
-        * 3) The link is UP and no longer in LTSSM training
-        *     The link is up and operating correctly.
-        */
-       while (1) {
-               rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
-               if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
-                       break;
-               if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
-                       return 1;
-               if (!count--)
-                       break;
-               dev_dbg(pp->dev, "Link is up, but still in training\n");
-               /*
-                * Wait a little bit, then re-check if the link finished
-                * the training.
-                */
-               usleep_range(1000, 2000);
-       }
-       /*
-        * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
-        * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
-        * If (MAC/LTSSM.state == Recovery.RcvrLock)
-        * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
-        * to gen2 is stuck
-        */
-       pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
-       debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
-
-       if (rx_valid & PCIE_PHY_RX_ASIC_OUT_VALID)
-               return 0;
-
-       if ((debug_r0 & 0x3f) != 0x0d)
-               return 0;
-
-       dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
-       dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
-
-       imx6_pcie_reset_phy(pp);
-
-       return 0;
+       return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
+                       PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
 }
 
 static struct pcie_host_ops imx6_pcie_host_ops = {
index 3923bed93c7e06fa8327cafef9429c34d1b063cb..c40d8b2ce3300c4b2e5b075ff3d13d14e7ca1e0e 100644 (file)
@@ -203,6 +203,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
        { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
        { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
        { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+       { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
        { },
 };
 MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
index 21716827847a8ab02f1123a49ffc23141571ebc9..f85f10d220490308be8413faf9c69aeca26c9c1e 100644 (file)
@@ -517,6 +517,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
        if (pp->ops->host_init)
                pp->ops->host_init(pp);
 
+       /*
+        * If the platform provides ->rd_other_conf, it means the platform
+        * uses its own address translation component rather than ATU, so
+        * we should not program the ATU here.
+        */
        if (!pp->ops->rd_other_conf)
                dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
@@ -551,13 +556,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
        pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 #endif
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
-               pci_bus_size_bridges(bus);
-               pci_bus_assign_resources(bus);
+       pci_bus_size_bridges(bus);
+       pci_bus_assign_resources(bus);
 
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child);
-       }
+       list_for_each_entry(child, &bus->children, node)
+               pcie_bus_configure_settings(child);
 
        pci_bus_add_devices(bus);
        return 0;
index 5816bceddb650c24576464913f9b7044d38be585..a576aeeb22da6cec1a01784328e209a66e96e8df 100644 (file)
@@ -64,7 +64,6 @@
 #define OARR_SIZE_CFG                BIT(OARR_SIZE_CFG_SHIFT)
 
 #define MAX_NUM_OB_WINDOWS           2
-#define MAX_NUM_PAXC_PF              4
 
 #define IPROC_PCIE_REG_INVALID 0xffff
 
@@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
        writel(val, pcie->base + offset + (window * 8));
 }
 
-static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
-                                             unsigned int slot,
-                                             unsigned int fn)
-{
-       if (slot > 0)
-               return false;
-
-       /* PAXC can only support limited number of functions */
-       if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
-               return false;
-
-       return true;
-}
-
 /**
  * Note access to the configuration registers are protected at the higher layer
  * by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
        u32 val;
        u16 offset;
 
-       if (!iproc_pcie_device_is_valid(pcie, slot, fn))
-               return NULL;
-
        /* root complex access */
        if (busno == 0) {
+               if (slot > 0 || fn > 0)
+                       return NULL;
+
                iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
                                     where & CFG_IND_ADDR_MASK);
                offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
                        return (pcie->base + offset);
        }
 
+       /*
+        * PAXC is connected to an internally emulated EP within the SoC.  It
+        * allows only one device.
+        */
+       if (pcie->type == IPROC_PCIE_PAXC)
+               if (slot > 0)
+                       return NULL;
+
        /* EP device access */
        val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
                (slot << CFG_ADDR_DEV_NUM_SHIFT) |
index 4edb5181f4e2ca64dc24d8c2cc1ebec46b2d7384..35092188039b99264f7911a3fdd91005c043d4d0 100644 (file)
@@ -390,9 +390,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
 
        rcar_pcie_setup(&res, pcie);
 
-       /* Do not reassign resources if probe only */
-       if (!pci_has_flag(PCI_PROBE_ONLY))
-               pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+       pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
 
        if (IS_ENABLED(CONFIG_PCI_MSI))
                bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
@@ -408,13 +406,11 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
 
        pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
-               pci_bus_size_bridges(bus);
-               pci_bus_assign_resources(bus);
+       pci_bus_size_bridges(bus);
+       pci_bus_assign_resources(bus);
 
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child);
-       }
+       list_for_each_entry(child, &bus->children, node)
+               pcie_bus_configure_settings(child);
 
        pci_bus_add_devices(bus);
 
index 5f2fda12e0066d9bdd90bdf08139c706a2f1a2f0..fa49f9143b80631108e10ef78e5e45e285c40cf8 100644 (file)
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 {
        pci_lock_rescan_remove();
 
-       if (slot->flags & SLOT_IS_GOING_AWAY)
+       if (slot->flags & SLOT_IS_GOING_AWAY) {
+               pci_unlock_rescan_remove();
                return -ENODEV;
+       }
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
index 602eb422351060c611967538359b8409885397a1..64c0a1215f842b2d00e488d0bd8675f4198a1534 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
 #include <linux/pci_hotplug.h>
-#include <asm-generic/pci-bridge.h>
 #include <asm/setup.h>
 #include <linux/aer.h>
 #include "pci.h"
index 20db790465dd682efae0ade88d7ac42cfd52b060..e2760a39a98ad1f9524bf4ca5b6445c2ee732840 100644 (file)
@@ -124,16 +124,13 @@ static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
 static struct pci_bus_ops *pci_bus_ops_pop(void)
 {
        unsigned long flags;
-       struct pci_bus_ops *bus_ops = NULL;
+       struct pci_bus_ops *bus_ops;
 
        spin_lock_irqsave(&inject_lock, flags);
-       if (list_empty(&pci_bus_ops_list))
-               bus_ops = NULL;
-       else {
-               struct list_head *lh = pci_bus_ops_list.next;
-               list_del(lh);
-               bus_ops = list_entry(lh, struct pci_bus_ops, list);
-       }
+       bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
+                                          struct pci_bus_ops, list);
+       if (bus_ops)
+               list_del(&bus_ops->list);
        spin_unlock_irqrestore(&inject_lock, flags);
        return bus_ops;
 }
@@ -181,14 +178,16 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
        return target;
 }
 
-static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
-                       int size, u32 *val)
+static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
+                              int where, int size, u32 *val)
 {
        u32 *sim;
        struct aer_error *err;
        unsigned long flags;
        struct pci_ops *ops;
+       struct pci_ops *my_ops;
        int domain;
+       int rv;
 
        spin_lock_irqsave(&inject_lock, flags);
        if (size != sizeof(u32))
@@ -208,19 +207,32 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
        }
 out:
        ops = __find_pci_bus_ops(bus);
+       /*
+        * pci_lock must already be held, so we can directly
+        * manipulate bus->ops.  Many config access functions,
+        * including pci_generic_config_read() require the original
+        * bus->ops be installed to function, so temporarily put them
+        * back.
+        */
+       my_ops = bus->ops;
+       bus->ops = ops;
+       rv = ops->read(bus, devfn, where, size, val);
+       bus->ops = my_ops;
        spin_unlock_irqrestore(&inject_lock, flags);
-       return ops->read(bus, devfn, where, size, val);
+       return rv;
 }
 
-static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
-                        int size, u32 val)
+static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
+                               int where, int size, u32 val)
 {
        u32 *sim;
        struct aer_error *err;
        unsigned long flags;
        int rw1cs;
        struct pci_ops *ops;
+       struct pci_ops *my_ops;
        int domain;
+       int rv;
 
        spin_lock_irqsave(&inject_lock, flags);
        if (size != sizeof(u32))
@@ -243,13 +255,24 @@ static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
        }
 out:
        ops = __find_pci_bus_ops(bus);
+       /*
+        * pci_lock must already be held, so we can directly
+        * manipulate bus->ops.  Many config access functions,
+        * including pci_generic_config_write() require the original
+        * bus->ops be installed to function, so temporarily put them
+        * back.
+        */
+       my_ops = bus->ops;
+       bus->ops = ops;
+       rv = ops->write(bus, devfn, where, size, val);
+       bus->ops = my_ops;
        spin_unlock_irqrestore(&inject_lock, flags);
-       return ops->write(bus, devfn, where, size, val);
+       return rv;
 }
 
-static struct pci_ops pci_ops_aer = {
-       .read = pci_read_aer,
-       .write = pci_write_aer,
+static struct pci_ops aer_inj_pci_ops = {
+       .read = aer_inj_read_config,
+       .write = aer_inj_write_config,
 };
 
 static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
@@ -270,9 +293,9 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
        bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
        if (!bus_ops)
                return -ENOMEM;
-       ops = pci_bus_set_ops(bus, &pci_ops_aer);
+       ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
        spin_lock_irqsave(&inject_lock, flags);
-       if (ops == &pci_ops_aer)
+       if (ops == &aer_inj_pci_ops)
                goto out;
        pci_bus_ops_init(bus_ops, bus, ops);
        list_add(&bus_ops->list, &pci_bus_ops_list);
index 0bf82a20a0fb479ccfbdb43479bf9b0cf6ecff6a..48d21e0edd568cedc16b1626f78f0a95b22bca1c 100644 (file)
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
        rpc->rpd = dev;
        INIT_WORK(&rpc->dpc_handler, aer_isr);
        mutex_init(&rpc->rpc_mutex);
-       init_waitqueue_head(&rpc->wait_release);
 
        /* Use PCIe bus function to store rpc into PCIe device */
        set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
                if (rpc->isr)
                        free_irq(dev->irq, dev);
 
-               wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+               flush_work(&rpc->dpc_handler);
                aer_disable_rootport(rpc);
                kfree(rpc);
                set_service_data(dev, NULL);
index 84420b7c9456ecbb0e43a9e8ca539af5b2ee1a20..945c939a86c5c2919335d85a95dbeee34f9c0bff 100644 (file)
@@ -72,7 +72,6 @@ struct aer_rpc {
                                         * recovery on the same
                                         * root port hierarchy
                                         */
-       wait_queue_head_t wait_release;
 };
 
 struct aer_broadcast_data {
index 712392504ed9b9f1a8992079f801cf2c271a0ae6..521e39c1b66d597f8881d9ab02a4e4b41c1b68a0 100644 (file)
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
        while (get_e_source(rpc, &e_src))
                aer_isr_one_error(p_device, &e_src);
        mutex_unlock(&rpc->rpc_mutex);
-
-       wake_up(&rpc->wait_release);
 }
 
 /**
index 63fc63911295e7c0be875b39436c889c6850234e..1ae4c73e7a3c3037f5430df3159b606f69287c77 100644 (file)
@@ -396,7 +396,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
 {
        struct pcie_pme_service_data *data = get_service_data(srv);
        struct pci_dev *port = srv->port;
-       bool wakeup;
+       bool wakeup, wake_irq_enabled = false;
        int ret;
 
        if (device_may_wakeup(&port->dev)) {
@@ -409,11 +409,12 @@ static int pcie_pme_suspend(struct pcie_device *srv)
        spin_lock_irq(&data->lock);
        if (wakeup) {
                ret = enable_irq_wake(srv->irq);
-               data->suspend_level = PME_SUSPEND_WAKEUP;
+               if (ret == 0) {
+                       data->suspend_level = PME_SUSPEND_WAKEUP;
+                       wake_irq_enabled = true;
+               }
        }
-       if (!wakeup || ret) {
-               struct pci_dev *port = srv->port;
-
+       if (!wake_irq_enabled) {
                pcie_pme_interrupt_enable(port, false);
                pcie_clear_root_pme_status(port);
                data->suspend_level = PME_SUSPEND_NOIRQ;
index 6d7ab9bb0d5a6f002debd5a3b83e569f78c44c96..5eb378fbe8496dd2b715164e9fd6fcaaef417bd0 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/pci-aspm.h>
 #include <linux/aer.h>
 #include <linux/acpi.h>
-#include <asm-generic/pci-bridge.h>
 #include "pci.h"
 
 #define CARDBUS_LATENCY_TIMER  176     /* secondary latency timer */
@@ -1803,6 +1802,13 @@ static int only_one_child(struct pci_bus *bus)
                return 0;
        if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
                return 1;
+
+       /*
+        * PCIe downstream ports are bridges that normally lead to only a
+        * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
+        * possible devices, not just device 0.  See PCIe spec r3.0,
+        * sec 7.3.1.
+        */
        if (parent->has_secondary_link &&
            !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
                return 1;
index 0575a1e026b4c4550ee07655910cc64d0e0e9341..85fa6a2a6dd20c8866b9e58f12193b6f6a20a390 100644 (file)
@@ -3832,6 +3832,19 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
 #endif
 }
 
+static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
+{
+       /*
+        * Cavium devices matching this quirk do not perform peer-to-peer
+        * with other functions, allowing masking out these bits as if they
+        * were unimplemented in the ACS capability.
+        */
+       acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+                      PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+
+       return acs_flags ? 0 : 1;
+}
+
 /*
  * Many Intel PCH root ports do provide ACS-like features to disable peer
  * transactions and validate bus numbers in requests, but do not provide an
@@ -3984,6 +3997,8 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
        { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
        { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
+       /* Cavium ThunderX */
+       { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
        { 0 }
 };
 
index 7796d0a5befafb958c8bae6193b4bca56da96203..55641a39a3e94f3eab29a5494d943afc9614d3e6 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/ioport.h>
 #include <linux/cache.h>
 #include <linux/slab.h>
-#include <asm-generic/pci-bridge.h>
 #include "pci.h"
 
 unsigned int pci_flags;
index f700723ca5d6b1a8a0c342c568908f9c53aa2435..d28e3ab9479c64e41fe014714c08721fc503faf3 100644 (file)
@@ -342,6 +342,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
 /* Device IDs of parts that have 32KB MCH space */
 static const unsigned int mch_quirk_devices[] = {
        0x0154, /* Ivy Bridge */
+       0x0a04, /* Haswell-ULT */
        0x0c00, /* Haswell */
        0x1604, /* Broadwell */
 };
index 934c139916c609e3647acd5c6229a781ca348d56..ee4f183ef9eed337b012c4088bfeba5ae0c949a3 100644 (file)
@@ -178,7 +178,6 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
        struct ixp46x_ts_regs *regs = ixp_clock->regs;
@@ -189,8 +188,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 
        spin_unlock_irqrestore(&register_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
        return 0;
 }
 
@@ -202,8 +200,7 @@ static int ptp_ixp_settime(struct ptp_clock_info *ptp,
        struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
        struct ixp46x_ts_regs *regs = ixp_clock->regs;
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&register_lock, flags);
 
index 376322f71fd5723c90863298f4b4ed3eb7750d6e..ef456d3cf2655ac466f7671915d0b8e579492a50 100644 (file)
@@ -335,16 +335,6 @@ config RTC_DRV_RK808
          This driver can also be built as a module. If so, the module
          will be called rk808-rtc.
 
-config RTC_DRV_MAX77802
-       tristate "Maxim 77802 RTC"
-       depends on MFD_MAX77686
-       help
-         If you say yes here you will get support for the
-         RTC of Maxim MAX77802 PMIC.
-
-         This driver can also be built as a module. If so, the module
-         will be called rtc-max77802.
-
 config RTC_DRV_RS5C372
        tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
        help
index 62d61b26ca7e600a30c38cf754353457c19723f9..ed4519efa3caf01d5ce331cdddb1db6dd4e0b431 100644 (file)
@@ -86,7 +86,6 @@ obj-$(CONFIG_RTC_DRV_M48T86)  += rtc-m48t86.o
 obj-$(CONFIG_RTC_DRV_MAX6900)  += rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX6902)  += rtc-max6902.o
 obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
-obj-$(CONFIG_RTC_DRV_MAX77802) += rtc-max77802.o
 obj-$(CONFIG_RTC_DRV_MAX8907)  += rtc-max8907.o
 obj-$(CONFIG_RTC_DRV_MAX8925)  += rtc-max8925.o
 obj-$(CONFIG_RTC_DRV_MAX8997)  += rtc-max8997.o
index 7184a0eda79384f8a59a5c8be8155018ba1a792e..0f2965d912ae8b7c6dbbc4bfb9dfe1d628b34d10 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * RTC driver for Maxim MAX77686
+ * RTC driver for Maxim MAX77686 and MAX77802
  *
  * Copyright (C) 2012 Samsung Electronics Co.Ltd
  *
@@ -12,8 +12,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/slab.h>
 #include <linux/rtc.h>
 #include <linux/delay.h>
 #define ALARM_ENABLE_SHIFT             7
 #define ALARM_ENABLE_MASK              (1 << ALARM_ENABLE_SHIFT)
 
-#define MAX77686_RTC_UPDATE_DELAY      16
+#define REG_RTC_NONE                   0xdeadbeef
+
+/*
+ * MAX77802 has separate register (RTCAE1) for alarm enable instead
+ * using 1 bit from registers RTC{SEC,MIN,HOUR,DAY,MONTH,YEAR,DATE}
+ * as in done in MAX77686.
+ */
+#define MAX77802_ALARM_ENABLE_VALUE    0x77
 
 enum {
        RTC_SEC = 0,
@@ -54,6 +59,19 @@ enum {
        RTC_NR_TIME
 };
 
+struct max77686_rtc_driver_data {
+       /* Minimum usecs needed for a RTC update */
+       unsigned long           delay;
+       /* Mask used to read RTC registers value */
+       u8                      mask;
+       /* Registers offset to I2C addresses map */
+       const unsigned int      *map;
+       /* Has a separate alarm enable register? */
+       bool                    alarm_enable_reg;
+       /* Has a separate I2C regmap for the RTC? */
+       bool                    separate_i2c_addr;
+};
+
 struct max77686_rtc_info {
        struct device           *dev;
        struct max77686_dev     *max77686;
@@ -63,6 +81,8 @@ struct max77686_rtc_info {
 
        struct regmap           *regmap;
 
+       const struct max77686_rtc_driver_data *drv_data;
+
        int virq;
        int rtc_24hr_mode;
 };
@@ -72,12 +92,120 @@ enum MAX77686_RTC_OP {
        MAX77686_RTC_READ,
 };
 
+/* These are not registers but just offsets that are mapped to addresses */
+enum max77686_rtc_reg_offset {
+       REG_RTC_CONTROLM = 0,
+       REG_RTC_CONTROL,
+       REG_RTC_UPDATE0,
+       REG_WTSR_SMPL_CNTL,
+       REG_RTC_SEC,
+       REG_RTC_MIN,
+       REG_RTC_HOUR,
+       REG_RTC_WEEKDAY,
+       REG_RTC_MONTH,
+       REG_RTC_YEAR,
+       REG_RTC_DATE,
+       REG_ALARM1_SEC,
+       REG_ALARM1_MIN,
+       REG_ALARM1_HOUR,
+       REG_ALARM1_WEEKDAY,
+       REG_ALARM1_MONTH,
+       REG_ALARM1_YEAR,
+       REG_ALARM1_DATE,
+       REG_ALARM2_SEC,
+       REG_ALARM2_MIN,
+       REG_ALARM2_HOUR,
+       REG_ALARM2_WEEKDAY,
+       REG_ALARM2_MONTH,
+       REG_ALARM2_YEAR,
+       REG_ALARM2_DATE,
+       REG_RTC_AE1,
+       REG_RTC_END,
+};
+
+/* Maps RTC registers offset to the MAX77686 register addresses */
+static const unsigned int max77686_map[REG_RTC_END] = {
+       [REG_RTC_CONTROLM]   = MAX77686_RTC_CONTROLM,
+       [REG_RTC_CONTROL]    = MAX77686_RTC_CONTROL,
+       [REG_RTC_UPDATE0]    = MAX77686_RTC_UPDATE0,
+       [REG_WTSR_SMPL_CNTL] = MAX77686_WTSR_SMPL_CNTL,
+       [REG_RTC_SEC]        = MAX77686_RTC_SEC,
+       [REG_RTC_MIN]        = MAX77686_RTC_MIN,
+       [REG_RTC_HOUR]       = MAX77686_RTC_HOUR,
+       [REG_RTC_WEEKDAY]    = MAX77686_RTC_WEEKDAY,
+       [REG_RTC_MONTH]      = MAX77686_RTC_MONTH,
+       [REG_RTC_YEAR]       = MAX77686_RTC_YEAR,
+       [REG_RTC_DATE]       = MAX77686_RTC_DATE,
+       [REG_ALARM1_SEC]     = MAX77686_ALARM1_SEC,
+       [REG_ALARM1_MIN]     = MAX77686_ALARM1_MIN,
+       [REG_ALARM1_HOUR]    = MAX77686_ALARM1_HOUR,
+       [REG_ALARM1_WEEKDAY] = MAX77686_ALARM1_WEEKDAY,
+       [REG_ALARM1_MONTH]   = MAX77686_ALARM1_MONTH,
+       [REG_ALARM1_YEAR]    = MAX77686_ALARM1_YEAR,
+       [REG_ALARM1_DATE]    = MAX77686_ALARM1_DATE,
+       [REG_ALARM2_SEC]     = MAX77686_ALARM2_SEC,
+       [REG_ALARM2_MIN]     = MAX77686_ALARM2_MIN,
+       [REG_ALARM2_HOUR]    = MAX77686_ALARM2_HOUR,
+       [REG_ALARM2_WEEKDAY] = MAX77686_ALARM2_WEEKDAY,
+       [REG_ALARM2_MONTH]   = MAX77686_ALARM2_MONTH,
+       [REG_ALARM2_YEAR]    = MAX77686_ALARM2_YEAR,
+       [REG_ALARM2_DATE]    = MAX77686_ALARM2_DATE,
+       [REG_RTC_AE1]        = REG_RTC_NONE,
+};
+
+static const struct max77686_rtc_driver_data max77686_drv_data = {
+       .delay = 16000,
+       .mask  = 0x7f,
+       .map   = max77686_map,
+       .alarm_enable_reg  = false,
+       .separate_i2c_addr = true,
+};
+
+static const unsigned int max77802_map[REG_RTC_END] = {
+       [REG_RTC_CONTROLM]   = MAX77802_RTC_CONTROLM,
+       [REG_RTC_CONTROL]    = MAX77802_RTC_CONTROL,
+       [REG_RTC_UPDATE0]    = MAX77802_RTC_UPDATE0,
+       [REG_WTSR_SMPL_CNTL] = MAX77802_WTSR_SMPL_CNTL,
+       [REG_RTC_SEC]        = MAX77802_RTC_SEC,
+       [REG_RTC_MIN]        = MAX77802_RTC_MIN,
+       [REG_RTC_HOUR]       = MAX77802_RTC_HOUR,
+       [REG_RTC_WEEKDAY]    = MAX77802_RTC_WEEKDAY,
+       [REG_RTC_MONTH]      = MAX77802_RTC_MONTH,
+       [REG_RTC_YEAR]       = MAX77802_RTC_YEAR,
+       [REG_RTC_DATE]       = MAX77802_RTC_DATE,
+       [REG_ALARM1_SEC]     = MAX77802_ALARM1_SEC,
+       [REG_ALARM1_MIN]     = MAX77802_ALARM1_MIN,
+       [REG_ALARM1_HOUR]    = MAX77802_ALARM1_HOUR,
+       [REG_ALARM1_WEEKDAY] = MAX77802_ALARM1_WEEKDAY,
+       [REG_ALARM1_MONTH]   = MAX77802_ALARM1_MONTH,
+       [REG_ALARM1_YEAR]    = MAX77802_ALARM1_YEAR,
+       [REG_ALARM1_DATE]    = MAX77802_ALARM1_DATE,
+       [REG_ALARM2_SEC]     = MAX77802_ALARM2_SEC,
+       [REG_ALARM2_MIN]     = MAX77802_ALARM2_MIN,
+       [REG_ALARM2_HOUR]    = MAX77802_ALARM2_HOUR,
+       [REG_ALARM2_WEEKDAY] = MAX77802_ALARM2_WEEKDAY,
+       [REG_ALARM2_MONTH]   = MAX77802_ALARM2_MONTH,
+       [REG_ALARM2_YEAR]    = MAX77802_ALARM2_YEAR,
+       [REG_ALARM2_DATE]    = MAX77802_ALARM2_DATE,
+       [REG_RTC_AE1]        = MAX77802_RTC_AE1,
+};
+
+static const struct max77686_rtc_driver_data max77802_drv_data = {
+       .delay = 200,
+       .mask  = 0xff,
+       .map   = max77802_map,
+       .alarm_enable_reg  = true,
+       .separate_i2c_addr = false,
+};
+
 static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
-                                  int rtc_24hr_mode)
+                                   struct max77686_rtc_info *info)
 {
-       tm->tm_sec = data[RTC_SEC] & 0x7f;
-       tm->tm_min = data[RTC_MIN] & 0x7f;
-       if (rtc_24hr_mode)
+       u8 mask = info->drv_data->mask;
+
+       tm->tm_sec = data[RTC_SEC] & mask;
+       tm->tm_min = data[RTC_MIN] & mask;
+       if (info->rtc_24hr_mode)
                tm->tm_hour = data[RTC_HOUR] & 0x1f;
        else {
                tm->tm_hour = data[RTC_HOUR] & 0x0f;
@@ -86,15 +214,23 @@ static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
        }
 
        /* Only a single bit is set in data[], so fls() would be equivalent */
-       tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f) - 1;
+       tm->tm_wday = ffs(data[RTC_WEEKDAY] & mask) - 1;
        tm->tm_mday = data[RTC_DATE] & 0x1f;
        tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-       tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+       tm->tm_year = data[RTC_YEAR] & mask;
        tm->tm_yday = 0;
        tm->tm_isdst = 0;
+
+       /*
+        * MAX77686 uses 1 bit from sec/min/hour/etc RTC registers and the
+        * year values are just 0..99 so add 100 to support up to 2099.
+        */
+       if (!info->drv_data->alarm_enable_reg)
+               tm->tm_year += 100;
 }
 
-static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data,
+                                  struct max77686_rtc_info *info)
 {
        data[RTC_SEC] = tm->tm_sec;
        data[RTC_MIN] = tm->tm_min;
@@ -102,13 +238,20 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
        data[RTC_WEEKDAY] = 1 << tm->tm_wday;
        data[RTC_DATE] = tm->tm_mday;
        data[RTC_MONTH] = tm->tm_mon + 1;
+
+       if (info->drv_data->alarm_enable_reg) {
+               data[RTC_YEAR] = tm->tm_year;
+               return 0;
+       }
+
        data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
 
        if (tm->tm_year < 100) {
-               pr_warn("RTC cannot handle the year %d.  Assume it's 2000.\n",
+               dev_err(info->dev, "RTC cannot handle the year %d.\n",
                        1900 + tm->tm_year);
                return -EINVAL;
        }
+
        return 0;
 }
 
@@ -117,6 +260,7 @@ static int max77686_rtc_update(struct max77686_rtc_info *info,
 {
        int ret;
        unsigned int data;
+       unsigned long delay = info->drv_data->delay;
 
        if (op == MAX77686_RTC_WRITE)
                data = 1 << RTC_UDR_SHIFT;
@@ -124,13 +268,14 @@ static int max77686_rtc_update(struct max77686_rtc_info *info,
                data = 1 << RTC_RBUDR_SHIFT;
 
        ret = regmap_update_bits(info->max77686->rtc_regmap,
-                                MAX77686_RTC_UPDATE0, data, data);
+                                info->drv_data->map[REG_RTC_UPDATE0],
+                                data, data);
        if (ret < 0)
-               dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
-                               __func__, ret, data);
+               dev_err(info->dev, "Fail to write update reg(ret=%d, data=0x%x)\n",
+                       ret, data);
        else {
-               /* Minimum 16ms delay required before RTC update. */
-               msleep(MAX77686_RTC_UPDATE_DELAY);
+               /* Minimum delay required before RTC update. */
+               usleep_range(delay, delay * 2);
        }
 
        return ret;
@@ -149,13 +294,14 @@ static int max77686_rtc_read_time(struct device *dev, struct rtc_time *tm)
                goto out;
 
        ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                               MAX77686_RTC_SEC, data, RTC_NR_TIME);
+                              info->drv_data->map[REG_RTC_SEC],
+                              data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__, ret);
+               dev_err(info->dev, "Fail to read time reg(%d)\n", ret);
                goto out;
        }
 
-       max77686_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+       max77686_rtc_data_to_tm(data, tm, info);
 
        ret = rtc_valid_tm(tm);
 
@@ -170,17 +316,17 @@ static int max77686_rtc_set_time(struct device *dev, struct rtc_time *tm)
        u8 data[RTC_NR_TIME];
        int ret;
 
-       ret = max77686_rtc_tm_to_data(tm, data);
+       ret = max77686_rtc_tm_to_data(tm, data, info);
        if (ret < 0)
                return ret;
 
        mutex_lock(&info->lock);
 
        ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_RTC_SEC, data, RTC_NR_TIME);
+                               info->drv_data->map[REG_RTC_SEC],
+                               data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
-                               ret);
+               dev_err(info->dev, "Fail to write time reg(%d)\n", ret);
                goto out;
        }
 
@@ -196,6 +342,7 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
        struct max77686_rtc_info *info = dev_get_drvdata(dev);
        u8 data[RTC_NR_TIME];
        unsigned int val;
+       const unsigned int *map = info->drv_data->map;
        int i, ret;
 
        mutex_lock(&info->lock);
@@ -205,28 +352,47 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
                goto out;
 
        ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+                              map[REG_ALARM1_SEC], data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
-                               __func__, __LINE__, ret);
+               dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
                goto out;
        }
 
-       max77686_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+       max77686_rtc_data_to_tm(data, &alrm->time, info);
 
        alrm->enabled = 0;
-       for (i = 0; i < RTC_NR_TIME; i++) {
-               if (data[i] & ALARM_ENABLE_MASK) {
+
+       if (info->drv_data->alarm_enable_reg) {
+               if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+                       ret = -EINVAL;
+                       dev_err(info->dev,
+                               "alarm enable register not set(%d)\n", ret);
+                       goto out;
+               }
+
+               ret = regmap_read(info->max77686->regmap,
+                                 map[REG_RTC_AE1], &val);
+               if (ret < 0) {
+                       dev_err(info->dev,
+                               "fail to read alarm enable(%d)\n", ret);
+                       goto out;
+               }
+
+               if (val)
                        alrm->enabled = 1;
-                       break;
+       } else {
+               for (i = 0; i < ARRAY_SIZE(data); i++) {
+                       if (data[i] & ALARM_ENABLE_MASK) {
+                               alrm->enabled = 1;
+                               break;
+                       }
                }
        }
 
        alrm->pending = 0;
        ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
        if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
-                               __func__, __LINE__, ret);
+               dev_err(info->dev, "Fail to read status2 reg(%d)\n", ret);
                goto out;
        }
 
@@ -235,7 +401,7 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 
 out:
        mutex_unlock(&info->lock);
-       return 0;
+       return ret;
 }
 
 static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
@@ -243,6 +409,7 @@ static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
        u8 data[RTC_NR_TIME];
        int ret, i;
        struct rtc_time tm;
+       const unsigned int *map = info->drv_data->map;
 
        if (!mutex_is_locked(&info->lock))
                dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -251,24 +418,36 @@ static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
        if (ret < 0)
                goto out;
 
-       ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
+       if (info->drv_data->alarm_enable_reg) {
+               if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+                       ret = -EINVAL;
+                       dev_err(info->dev,
+                               "alarm enable register not set(%d)\n", ret);
+                       goto out;
+               }
 
-       max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+               ret = regmap_write(info->max77686->regmap, map[REG_RTC_AE1], 0);
+       } else {
+               ret = regmap_bulk_read(info->max77686->rtc_regmap,
+                                      map[REG_ALARM1_SEC], data,
+                                      ARRAY_SIZE(data));
+               if (ret < 0) {
+                       dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+                       goto out;
+               }
 
-       for (i = 0; i < RTC_NR_TIME; i++)
-               data[i] &= ~ALARM_ENABLE_MASK;
+               max77686_rtc_data_to_tm(data, &tm, info);
+
+               for (i = 0; i < ARRAY_SIZE(data); i++)
+                       data[i] &= ~ALARM_ENABLE_MASK;
+
+               ret = regmap_bulk_write(info->max77686->rtc_regmap,
+                                       map[REG_ALARM1_SEC], data,
+                                       ARRAY_SIZE(data));
+       }
 
-       ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
                goto out;
        }
 
@@ -282,6 +461,7 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
        u8 data[RTC_NR_TIME];
        int ret;
        struct rtc_time tm;
+       const unsigned int *map = info->drv_data->map;
 
        if (!mutex_is_locked(&info->lock))
                dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -290,32 +470,38 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
        if (ret < 0)
                goto out;
 
-       ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
-
-       max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+       if (info->drv_data->alarm_enable_reg) {
+               ret = regmap_write(info->max77686->regmap, map[REG_RTC_AE1],
+                                  MAX77802_ALARM_ENABLE_VALUE);
+       } else {
+               ret = regmap_bulk_read(info->max77686->rtc_regmap,
+                                      map[REG_ALARM1_SEC], data,
+                                      ARRAY_SIZE(data));
+               if (ret < 0) {
+                       dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+                       goto out;
+               }
 
-       data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
-       data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
-       data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
-       data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
-       if (data[RTC_MONTH] & 0xf)
-               data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
-       if (data[RTC_YEAR] & 0x7f)
-               data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
-       if (data[RTC_DATE] & 0x1f)
-               data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+               max77686_rtc_data_to_tm(data, &tm, info);
+
+               data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+               data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+               data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+               data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+               if (data[RTC_MONTH] & 0xf)
+                       data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+               if (data[RTC_YEAR] & info->drv_data->mask)
+                       data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+               if (data[RTC_DATE] & 0x1f)
+                       data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+               ret = regmap_bulk_write(info->max77686->rtc_regmap,
+                                       map[REG_ALARM1_SEC], data,
+                                       ARRAY_SIZE(data));
+       }
 
-       ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
                goto out;
        }
 
@@ -330,7 +516,7 @@ static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
        u8 data[RTC_NR_TIME];
        int ret;
 
-       ret = max77686_rtc_tm_to_data(&alrm->time, data);
+       ret = max77686_rtc_tm_to_data(&alrm->time, data, info);
        if (ret < 0)
                return ret;
 
@@ -341,11 +527,11 @@ static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
                goto out;
 
        ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+                               info->drv_data->map[REG_ALARM1_SEC],
+                               data, ARRAY_SIZE(data));
 
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
                goto out;
        }
 
@@ -380,7 +566,7 @@ static irqreturn_t max77686_rtc_alarm_irq(int irq, void *data)
 {
        struct max77686_rtc_info *info = data;
 
-       dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+       dev_dbg(info->dev, "RTC alarm IRQ: %d\n", irq);
 
        rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
 
@@ -406,10 +592,11 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
 
        info->rtc_24hr_mode = 1;
 
-       ret = regmap_bulk_write(info->max77686->rtc_regmap, MAX77686_RTC_CONTROLM, data, 2);
+       ret = regmap_bulk_write(info->max77686->rtc_regmap,
+                               info->drv_data->map[REG_RTC_CONTROLM],
+                               data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write controlm reg(%d)\n", ret);
                return ret;
        }
 
@@ -421,10 +608,9 @@ static int max77686_rtc_probe(struct platform_device *pdev)
 {
        struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
        struct max77686_rtc_info *info;
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        int ret;
 
-       dev_info(&pdev->dev, "%s\n", __func__);
-
        info = devm_kzalloc(&pdev->dev, sizeof(struct max77686_rtc_info),
                                GFP_KERNEL);
        if (!info)
@@ -434,6 +620,11 @@ static int max77686_rtc_probe(struct platform_device *pdev)
        info->dev = &pdev->dev;
        info->max77686 = max77686;
        info->rtc = max77686->rtc;
+       info->drv_data = (const struct max77686_rtc_driver_data *)
+               id->driver_data;
+
+       if (!info->drv_data->separate_i2c_addr)
+               info->max77686->rtc_regmap = info->max77686->regmap;
 
        platform_set_drvdata(pdev, info);
 
@@ -446,7 +637,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
 
        device_init_wakeup(&pdev->dev, 1);
 
-       info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77686-rtc",
+       info->rtc_dev = devm_rtc_device_register(&pdev->dev, id->name,
                                        &max77686_rtc_ops, THIS_MODULE);
 
        if (IS_ERR(info->rtc_dev)) {
@@ -459,13 +650,13 @@ static int max77686_rtc_probe(struct platform_device *pdev)
 
        if (!max77686->rtc_irq_data) {
                ret = -EINVAL;
-               dev_err(&pdev->dev, "%s: no RTC regmap IRQ chip\n", __func__);
+               dev_err(&pdev->dev, "No RTC regmap IRQ chip\n");
                goto err_rtc;
        }
 
        info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
                                         MAX77686_RTCIRQ_RTCA1);
-       if (!info->virq) {
+       if (info->virq <= 0) {
                ret = -ENXIO;
                goto err_rtc;
        }
@@ -508,7 +699,8 @@ static SIMPLE_DEV_PM_OPS(max77686_rtc_pm_ops,
                         max77686_rtc_suspend, max77686_rtc_resume);
 
 static const struct platform_device_id rtc_id[] = {
-       { "max77686-rtc", 0 },
+       { "max77686-rtc", .driver_data = (kernel_ulong_t)&max77686_drv_data, },
+       { "max77802-rtc", .driver_data = (kernel_ulong_t)&max77802_drv_data, },
        {},
 };
 MODULE_DEVICE_TABLE(platform, rtc_id);
diff --git a/drivers/rtc/rtc-max77802.c b/drivers/rtc/rtc-max77802.c
deleted file mode 100644 (file)
index 82ffcc5..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * RTC driver for Maxim MAX77802
- *
- * Copyright (C) 2013 Google, Inc
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- *
- *  based on rtc-max8997.c
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/rtc.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/max77686-private.h>
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-/* RTC Control Register */
-#define BCD_EN_SHIFT                   0
-#define BCD_EN_MASK                    (1 << BCD_EN_SHIFT)
-#define MODEL24_SHIFT                  1
-#define MODEL24_MASK                   (1 << MODEL24_SHIFT)
-/* RTC Update Register1 */
-#define RTC_UDR_SHIFT                  0
-#define RTC_UDR_MASK                   (1 << RTC_UDR_SHIFT)
-#define RTC_RBUDR_SHIFT                        4
-#define RTC_RBUDR_MASK                 (1 << RTC_RBUDR_SHIFT)
-/* RTC Hour register */
-#define HOUR_PM_SHIFT                  6
-#define HOUR_PM_MASK                   (1 << HOUR_PM_SHIFT)
-/* RTC Alarm Enable */
-#define ALARM_ENABLE_SHIFT             7
-#define ALARM_ENABLE_MASK              (1 << ALARM_ENABLE_SHIFT)
-
-/* For the RTCAE1 register, we write this value to enable the alarm */
-#define ALARM_ENABLE_VALUE             0x77
-
-#define MAX77802_RTC_UPDATE_DELAY_US   200
-
-enum {
-       RTC_SEC = 0,
-       RTC_MIN,
-       RTC_HOUR,
-       RTC_WEEKDAY,
-       RTC_MONTH,
-       RTC_YEAR,
-       RTC_DATE,
-       RTC_NR_TIME
-};
-
-struct max77802_rtc_info {
-       struct device           *dev;
-       struct max77686_dev     *max77802;
-       struct i2c_client       *rtc;
-       struct rtc_device       *rtc_dev;
-       struct mutex            lock;
-
-       struct regmap           *regmap;
-
-       int virq;
-       int rtc_24hr_mode;
-};
-
-enum MAX77802_RTC_OP {
-       MAX77802_RTC_WRITE,
-       MAX77802_RTC_READ,
-};
-
-static void max77802_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
-                                  int rtc_24hr_mode)
-{
-       tm->tm_sec = data[RTC_SEC] & 0xff;
-       tm->tm_min = data[RTC_MIN] & 0xff;
-       if (rtc_24hr_mode)
-               tm->tm_hour = data[RTC_HOUR] & 0x1f;
-       else {
-               tm->tm_hour = data[RTC_HOUR] & 0x0f;
-               if (data[RTC_HOUR] & HOUR_PM_MASK)
-                       tm->tm_hour += 12;
-       }
-
-       /* Only a single bit is set in data[], so fls() would be equivalent */
-       tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0xff) - 1;
-       tm->tm_mday = data[RTC_DATE] & 0x1f;
-       tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-
-       tm->tm_year = data[RTC_YEAR] & 0xff;
-       tm->tm_yday = 0;
-       tm->tm_isdst = 0;
-}
-
-static int max77802_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
-{
-       data[RTC_SEC] = tm->tm_sec;
-       data[RTC_MIN] = tm->tm_min;
-       data[RTC_HOUR] = tm->tm_hour;
-       data[RTC_WEEKDAY] = 1 << tm->tm_wday;
-       data[RTC_DATE] = tm->tm_mday;
-       data[RTC_MONTH] = tm->tm_mon + 1;
-       data[RTC_YEAR] = tm->tm_year;
-
-       return 0;
-}
-
-static int max77802_rtc_update(struct max77802_rtc_info *info,
-       enum MAX77802_RTC_OP op)
-{
-       int ret;
-       unsigned int data;
-
-       if (op == MAX77802_RTC_WRITE)
-               data = 1 << RTC_UDR_SHIFT;
-       else
-               data = 1 << RTC_RBUDR_SHIFT;
-
-       ret = regmap_update_bits(info->max77802->regmap,
-                                MAX77802_RTC_UPDATE0, data, data);
-       if (ret < 0)
-               dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
-                               __func__, ret, data);
-       else {
-               /* Minimum delay required before RTC update. */
-               usleep_range(MAX77802_RTC_UPDATE_DELAY_US,
-                            MAX77802_RTC_UPDATE_DELAY_US * 2);
-       }
-
-       return ret;
-}
-
-static int max77802_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       int ret;
-
-       mutex_lock(&info->lock);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_bulk_read(info->max77802->regmap,
-                               MAX77802_RTC_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,
-                       ret);
-               goto out;
-       }
-
-       max77802_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
-
-       ret = rtc_valid_tm(tm);
-
-out:
-       mutex_unlock(&info->lock);
-       return ret;
-}
-
-static int max77802_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       int ret;
-
-       ret = max77802_rtc_tm_to_data(tm, data);
-       if (ret < 0)
-               return ret;
-
-       mutex_lock(&info->lock);
-
-       ret = regmap_bulk_write(info->max77802->regmap,
-                                MAX77802_RTC_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
-                       ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-
-out:
-       mutex_unlock(&info->lock);
-       return ret;
-}
-
-static int max77802_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       unsigned int val;
-       int ret;
-
-       mutex_lock(&info->lock);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_bulk_read(info->max77802->regmap,
-                                MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
-                               __func__, __LINE__, ret);
-               goto out;
-       }
-
-       max77802_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
-
-       alrm->enabled = 0;
-       ret = regmap_read(info->max77802->regmap,
-                         MAX77802_RTC_AE1, &val);
-       if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read alarm enable(%d)\n",
-                       __func__, __LINE__, ret);
-               goto out;
-       }
-       if (val)
-               alrm->enabled = 1;
-
-       alrm->pending = 0;
-       ret = regmap_read(info->max77802->regmap, MAX77802_REG_STATUS2, &val);
-       if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
-                               __func__, __LINE__, ret);
-               goto out;
-       }
-
-       if (val & (1 << 2)) /* RTCA1 */
-               alrm->pending = 1;
-
-out:
-       mutex_unlock(&info->lock);
-       return 0;
-}
-
-static int max77802_rtc_stop_alarm(struct max77802_rtc_info *info)
-{
-       int ret;
-
-       if (!mutex_is_locked(&info->lock))
-               dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_write(info->max77802->regmap,
-                          MAX77802_RTC_AE1, 0);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                       __func__, ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
-       return ret;
-}
-
-static int max77802_rtc_start_alarm(struct max77802_rtc_info *info)
-{
-       int ret;
-
-       if (!mutex_is_locked(&info->lock))
-               dev_warn(info->dev, "%s: should have mutex locked\n",
-                        __func__);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_write(info->max77802->regmap,
-                                  MAX77802_RTC_AE1,
-                                  ALARM_ENABLE_VALUE);
-
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
-       return ret;
-}
-
-static int max77802_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       int ret;
-
-       ret = max77802_rtc_tm_to_data(&alrm->time, data);
-       if (ret < 0)
-               return ret;
-
-       mutex_lock(&info->lock);
-
-       ret = max77802_rtc_stop_alarm(info);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_bulk_write(info->max77802->regmap,
-                                MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-       if (ret < 0)
-               goto out;
-
-       if (alrm->enabled)
-               ret = max77802_rtc_start_alarm(info);
-out:
-       mutex_unlock(&info->lock);
-       return ret;
-}
-
-static int max77802_rtc_alarm_irq_enable(struct device *dev,
-                                        unsigned int enabled)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       int ret;
-
-       mutex_lock(&info->lock);
-       if (enabled)
-               ret = max77802_rtc_start_alarm(info);
-       else
-               ret = max77802_rtc_stop_alarm(info);
-       mutex_unlock(&info->lock);
-
-       return ret;
-}
-
-static irqreturn_t max77802_rtc_alarm_irq(int irq, void *data)
-{
-       struct max77802_rtc_info *info = data;
-
-       dev_dbg(info->dev, "%s:irq(%d)\n", __func__, irq);
-
-       rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
-
-       return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops max77802_rtc_ops = {
-       .read_time = max77802_rtc_read_time,
-       .set_time = max77802_rtc_set_time,
-       .read_alarm = max77802_rtc_read_alarm,
-       .set_alarm = max77802_rtc_set_alarm,
-       .alarm_irq_enable = max77802_rtc_alarm_irq_enable,
-};
-
-static int max77802_rtc_init_reg(struct max77802_rtc_info *info)
-{
-       u8 data[2];
-       int ret;
-
-       max77802_rtc_update(info, MAX77802_RTC_READ);
-
-       /* Set RTC control register : Binary mode, 24hour mdoe */
-       data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-       data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-
-       info->rtc_24hr_mode = 1;
-
-       ret = regmap_bulk_write(info->max77802->regmap,
-                               MAX77802_RTC_CONTROLM, data, ARRAY_SIZE(data));
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
-                               __func__, ret);
-               return ret;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-       return ret;
-}
-
-static int max77802_rtc_probe(struct platform_device *pdev)
-{
-       struct max77686_dev *max77802 = dev_get_drvdata(pdev->dev.parent);
-       struct max77802_rtc_info *info;
-       int ret;
-
-       dev_dbg(&pdev->dev, "%s\n", __func__);
-
-       info = devm_kzalloc(&pdev->dev, sizeof(struct max77802_rtc_info),
-                           GFP_KERNEL);
-       if (!info)
-               return -ENOMEM;
-
-       mutex_init(&info->lock);
-       info->dev = &pdev->dev;
-       info->max77802 = max77802;
-       info->rtc = max77802->i2c;
-
-       platform_set_drvdata(pdev, info);
-
-       ret = max77802_rtc_init_reg(info);
-
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
-               return ret;
-       }
-
-       device_init_wakeup(&pdev->dev, 1);
-
-       info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77802-rtc",
-                                                &max77802_rtc_ops, THIS_MODULE);
-
-       if (IS_ERR(info->rtc_dev)) {
-               ret = PTR_ERR(info->rtc_dev);
-               dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
-               if (ret == 0)
-                       ret = -EINVAL;
-               return ret;
-       }
-
-       if (!max77802->rtc_irq_data) {
-               dev_err(&pdev->dev, "No RTC regmap IRQ chip\n");
-               return -EINVAL;
-       }
-
-       info->virq = regmap_irq_get_virq(max77802->rtc_irq_data,
-                                        MAX77686_RTCIRQ_RTCA1);
-
-       if (info->virq <= 0) {
-               dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
-                       MAX77686_RTCIRQ_RTCA1);
-               return -EINVAL;
-       }
-
-       ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
-                                       max77802_rtc_alarm_irq, 0, "rtc-alarm1",
-                                       info);
-       if (ret < 0)
-               dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
-                       info->virq, ret);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int max77802_rtc_suspend(struct device *dev)
-{
-       if (device_may_wakeup(dev)) {
-               struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
-               return enable_irq_wake(info->virq);
-       }
-
-       return 0;
-}
-
-static int max77802_rtc_resume(struct device *dev)
-{
-       if (device_may_wakeup(dev)) {
-               struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
-               return disable_irq_wake(info->virq);
-       }
-
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(max77802_rtc_pm_ops,
-                        max77802_rtc_suspend, max77802_rtc_resume);
-
-static const struct platform_device_id rtc_id[] = {
-       { "max77802-rtc", 0 },
-       {},
-};
-MODULE_DEVICE_TABLE(platform, rtc_id);
-
-static struct platform_driver max77802_rtc_driver = {
-       .driver         = {
-               .name   = "max77802-rtc",
-               .pm     = &max77802_rtc_pm_ops,
-       },
-       .probe          = max77802_rtc_probe,
-       .id_table       = rtc_id,
-};
-
-module_platform_driver(max77802_rtc_driver);
-
-MODULE_DESCRIPTION("Maxim MAX77802 RTC driver");
-MODULE_AUTHOR("Simon Glass <sjg@chromium.org>");
-MODULE_LICENSE("GPL");
index c692dfebd0bab1c808f460349dff194b028f5c5c..50597f9522fe375ed764aab8842a400e56ef808d 100644 (file)
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
 
        device = container_of(kobj, struct device, kobj);
        chp = to_channelpath(device);
-       if (!chp->cmg_chars)
+       if (chp->cmg == -1)
                return 0;
 
-       return memory_read_from_buffer(buf, count, &off,
-                               chp->cmg_chars, sizeof(struct cmg_chars));
+       return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+                                      sizeof(chp->cmg_chars));
 }
 
 static struct bin_attribute chp_measurement_chars_attr = {
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
  * chp_update_desc - update channel-path description
  * @chp - channel-path
  *
- * Update the channel-path description of the specified channel-path.
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
  * Return zero on success, non-zero otherwise.
  */
 int chp_update_desc(struct channel_path *chp)
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
                return rc;
 
        rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+       if (rc)
+               return rc;
 
-       return rc;
+       return chsc_get_channel_measurement_chars(chp);
 }
 
 /**
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
                ret = -ENODEV;
                goto out_free;
        }
-       /* Get channel-measurement characteristics. */
-       if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
-               ret = chsc_get_channel_measurement_chars(chp);
-               if (ret)
-                       goto out_free;
-       } else {
-               chp->cmg = -1;
-       }
        dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
 
        /* make it known to the system */
index 4efd5b867cc390780a18c2f983a2215ed05dcd72..af0232290dc4c7c183a65e3fe13d6683bb48cf1e 100644 (file)
@@ -48,7 +48,7 @@ struct channel_path {
        /* Channel-measurement related stuff: */
        int cmg;
        int shared;
-       void *cmg_chars;
+       struct cmg_chars cmg_chars;
 };
 
 /* Return channel_path struct for given chpid. */
index a831d18596a5ef592f88ec5af031f639606c8c0b..c424c0c7367e360acd9ccd1ce899dc80bb575265 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 
 #include <asm/cio.h>
@@ -224,8 +225,9 @@ out_unreg:
 
 void chsc_chp_offline(struct chp_id chpid)
 {
-       char dbf_txt[15];
+       struct channel_path *chp = chpid_to_chp(chpid);
        struct chp_link link;
+       char dbf_txt[15];
 
        sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
        link.chpid = chpid;
        /* Wait until previous actions have settled. */
        css_wait_for_slow_path();
+
+       mutex_lock(&chp->lock);
+       chp_update_desc(chp);
+       mutex_unlock(&chp->lock);
+
        for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
 }
 
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 
 void chsc_chp_online(struct chp_id chpid)
 {
-       char dbf_txt[15];
+       struct channel_path *chp = chpid_to_chp(chpid);
        struct chp_link link;
+       char dbf_txt[15];
 
        sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
                link.chpid = chpid;
                /* Wait until previous actions have settled. */
                css_wait_for_slow_path();
+
+               mutex_lock(&chp->lock);
+               chp_update_desc(chp);
+               mutex_unlock(&chp->lock);
+
                for_each_subchannel_staged(__s390_process_res_acc, NULL,
                                           &link);
                css_schedule_reprobe();
@@ -967,22 +980,19 @@ static void
 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
                          struct cmg_chars *chars)
 {
-       struct cmg_chars *cmg_chars;
        int i, mask;
 
-       cmg_chars = chp->cmg_chars;
        for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
                mask = 0x80 >> (i + 3);
                if (cmcv & mask)
-                       cmg_chars->values[i] = chars->values[i];
+                       chp->cmg_chars.values[i] = chars->values[i];
                else
-                       cmg_chars->values[i] = 0;
+                       chp->cmg_chars.values[i] = 0;
        }
 }
 
 int chsc_get_channel_measurement_chars(struct channel_path *chp)
 {
-       struct cmg_chars *cmg_chars;
        int ccode, ret;
 
        struct {
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
                u32 data[NR_MEASUREMENT_CHARS];
        } __attribute__ ((packed)) *scmc_area;
 
-       chp->cmg_chars = NULL;
-       cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
-       if (!cmg_chars)
-               return -ENOMEM;
+       chp->shared = -1;
+       chp->cmg = -1;
+
+       if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+               return 0;
 
        spin_lock_irq(&chsc_page_lock);
        memset(chsc_page, 0, PAGE_SIZE);
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
                              scmc_area->response.code);
                goto out;
        }
-       if (scmc_area->not_valid) {
-               chp->cmg = -1;
-               chp->shared = -1;
+       if (scmc_area->not_valid)
                goto out;
-       }
+
        chp->cmg = scmc_area->cmg;
        chp->shared = scmc_area->shared;
        if (chp->cmg != 2 && chp->cmg != 3) {
                /* No cmg-dependent data. */
                goto out;
        }
-       chp->cmg_chars = cmg_chars;
        chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
                                  (struct cmg_chars *) &scmc_area->data);
 out:
        spin_unlock_irq(&chsc_page_lock);
-       if (!chp->cmg_chars)
-               kfree(cmg_chars);
-
        return ret;
 }
 
index 7b23f43c7b080cc81b4de94ed9016cc0d8a91546..de1b6c1d172c8c0866dc0fd8fb709d032bdef820 100644 (file)
@@ -112,9 +112,10 @@ static inline int convert_error(struct zcrypt_device *zdev,
                atomic_set(&zcrypt_rescan_req, 1);
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                       zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+                       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+                       ehdr->reply_code);
                return -EAGAIN;
        case REP82_ERROR_TRANSPORT_FAIL:
        case REP82_ERROR_MACHINE_FAILURE:
@@ -123,16 +124,18 @@ static inline int convert_error(struct zcrypt_device *zdev,
                atomic_set(&zcrypt_rescan_req, 1);
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                       zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+                       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+                       ehdr->reply_code);
                return -EAGAIN;
        default:
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                       zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+                       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+                       ehdr->reply_code);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
index 74edf2934e7c9fd652c33d1d14bb47b9e2bf44b8..eedfaa2cf715f698db447c1a79e857f170727dc5 100644 (file)
@@ -336,9 +336,10 @@ static int convert_type80(struct zcrypt_device *zdev,
                /* The result is too short, the CEX2A card may not do that.. */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                              zdev->ap_dev->qid, zdev->online, t80h->code);
+                              AP_QID_DEVICE(zdev->ap_dev->qid),
+                              zdev->online, t80h->code);
 
                return -EAGAIN; /* repeat the request on a different device. */
        }
@@ -368,9 +369,9 @@ static int convert_response(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
index 9a2dd472c1cc9870d37e98c5692c1c9c1e0ebac3..21959719daef94a00bd3f40282a2022077ec1344 100644 (file)
@@ -572,9 +572,9 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
                        return -EINVAL;
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                              zdev->ap_dev->qid, zdev->online,
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
                               msg->hdr.reply_code);
                return -EAGAIN; /* repeat the request on a different device. */
        }
@@ -715,9 +715,9 @@ static int convert_response_ica(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
@@ -747,9 +747,9 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
                xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
@@ -773,9 +773,9 @@ static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
@@ -800,9 +800,9 @@ static int convert_response_rng(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
index 37a0c7156087905b8cb749103e71d92547091e2e..b67661836c9fa26fccd1e23a82508c1d567a5916 100644 (file)
@@ -1,5 +1,7 @@
 config SCSI_HISI_SAS
        tristate "HiSilicon SAS"
+       depends on HAS_DMA
+       depends on ARM64 || COMPILE_TEST
        select SCSI_SAS_LIBSAS
        select BLK_DEV_INTEGRITY
        help
index 1412266314292dfa59bf473cf2ed3fdb2edebe65..a6682c508c4cbb800a19fd8b010a551f404d55b3 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <asm/dbdma.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include <asm/macio.h>
 
 #include <scsi/scsi.h>
index 555367f002282b238f8da84ab20100526ba7b1c1..1753e42826dd99bf7d69e1ea83f54a9b5ba34309 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/interrupt.h>
 #include <linux/reboot.h>
 #include <linux/spinlock.h>
+#include <linux/pci.h>
 #include <asm/dbdma.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
@@ -38,7 +39,6 @@
 #include <asm/processor.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
 #include <asm/macio.h>
 
 #include <scsi/scsi.h>
index 4e08d1cd704d1c261c82a98067649154f433ecd1..bb669d32ccd0daee203a69840313fcb9cf343ee0 100644 (file)
@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
            sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
            sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
                rw_max = q->limits.io_opt =
-                       logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+                       sdkp->opt_xfer_blocks * sdp->sector_size;
        else
                rw_max = BLK_DEF_MAX_SECTORS;
 
@@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
        int ret = 0;
 
-       if (!sdkp)
-               return 0;       /* this can happen */
+       if (!sdkp)      /* E.g.: runtime suspend following sd_remove() */
+               return 0;
 
        if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev)
 {
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
 
+       if (!sdkp)      /* E.g.: runtime resume at the start of sd_probe() */
+               return 0;
+
        if (!sdkp->device->manage_start_stop)
                return 0;
 
index 503ab8b46c0b4e8a73d7e826ba1aaf3be1abd6b0..5e820674432ca38c46ad154b831c8eb19e974d34 100644 (file)
@@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
        }
 
        sfp->mmap_called = 1;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = sfp;
        vma->vm_ops = &sg_mmap_vm_ops;
        return 0;
index 8bd54a64efd6a52aef163f0cbe4f34f5ef9ac71c..64c867405ad4ff2d5e53972a8ba34f785cc81962 100644 (file)
@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
 {
        struct scsi_cd *cd = dev_get_drvdata(dev);
 
+       if (!cd)        /* E.g.: runtime suspend following sr_remove() */
+               return 0;
+
        if (cd->media_present)
                return -EBUSY;
        else
@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
        scsi_autopm_get_device(cd->device);
 
        del_gendisk(cd->disk);
+       dev_set_drvdata(dev, NULL);
 
        mutex_lock(&sr_ref_mutex);
        kref_put(&cd->kref, sr_kref_release);
index 88260205a2614c84e8293bf82ca47c49abb2e29d..cb58ef0d9b2c50e071eeb453dca3a7aaac52a8de 100644 (file)
@@ -6,6 +6,7 @@ source "drivers/soc/fsl/qe/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
+source "drivers/soc/samsung/Kconfig"
 source "drivers/soc/sunxi/Kconfig"
 source "drivers/soc/tegra/Kconfig"
 source "drivers/soc/ti/Kconfig"
index 2afdc74f7491adf08e82937d0b6676e107705e2a..5ade71306ee10d080414aaf2168fc93fbcd6fe4b 100644 (file)
@@ -10,6 +10,7 @@ obj-y                         += fsl/
 obj-$(CONFIG_ARCH_MEDIATEK)    += mediatek/
 obj-$(CONFIG_ARCH_QCOM)                += qcom/
 obj-$(CONFIG_ARCH_ROCKCHIP)            += rockchip/
+obj-$(CONFIG_SOC_SAMSUNG)      += samsung/
 obj-$(CONFIG_ARCH_SUNXI)       += sunxi/
 obj-$(CONFIG_ARCH_TEGRA)       += tegra/
 obj-$(CONFIG_SOC_TI)           += ti/
index 498fd0581a451999b89cbb43805b37f003fdb379..d861eefffcc7c5e1fdf3f3f5ef324ba4a8a98e5f 100644 (file)
@@ -106,9 +106,9 @@ static const struct {
  * @channels:          list of all channels detected on this edge
  * @channels_lock:     guard for modifications of @channels
  * @allocated:         array of bitmaps representing already allocated channels
- * @need_rescan:       flag that the @work needs to scan smem for new channels
  * @smem_available:    last available amount of smem triggering a channel scan
- * @work:              work item for edge house keeping
+ * @scan_work:         work item for discovering new channels
+ * @state_work:                work item for edge state changes
  */
 struct qcom_smd_edge {
        struct qcom_smd *smd;
@@ -123,14 +123,16 @@ struct qcom_smd_edge {
        int ipc_bit;
 
        struct list_head channels;
-       spinlock_t channels_lock;
+       rwlock_t channels_lock;
 
        DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
 
-       bool need_rescan;
        unsigned smem_available;
 
-       struct work_struct work;
+       wait_queue_head_t new_channel_event;
+
+       struct work_struct scan_work;
+       struct work_struct state_work;
 };
 
 /*
@@ -186,13 +188,14 @@ struct qcom_smd_channel {
        int fifo_size;
 
        void *bounce_buffer;
-       int (*cb)(struct qcom_smd_device *, const void *, size_t);
+       qcom_smd_cb_t cb;
 
        spinlock_t recv_lock;
 
        int pkt_size;
 
        struct list_head list;
+       struct list_head dev_list;
 };
 
 /**
@@ -377,6 +380,19 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
        channel->pkt_size = 0;
 }
 
+/*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+                                         qcom_smd_cb_t cb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&channel->recv_lock, flags);
+       channel->cb = cb;
+       spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
 /*
  * Calculate the amount of data available in the rx fifo
  */
@@ -606,13 +622,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
        /*
         * Handle state changes or data on each of the channels on this edge
         */
-       spin_lock(&edge->channels_lock);
+       read_lock(&edge->channels_lock);
        list_for_each_entry(channel, &edge->channels, list) {
                spin_lock(&channel->recv_lock);
                kick_worker |= qcom_smd_channel_intr(channel);
                spin_unlock(&channel->recv_lock);
        }
-       spin_unlock(&edge->channels_lock);
+       read_unlock(&edge->channels_lock);
 
        /*
         * Creating a new channel requires allocating an smem entry, so we only
@@ -622,12 +638,11 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
        available = qcom_smem_get_free_space(edge->remote_pid);
        if (available != edge->smem_available) {
                edge->smem_available = available;
-               edge->need_rescan = true;
                kick_worker = true;
        }
 
        if (kick_worker)
-               schedule_work(&edge->work);
+               schedule_work(&edge->scan_work);
 
        return IRQ_HANDLED;
 }
@@ -793,18 +808,12 @@ static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
 }
 
 /*
- * Probe the smd client.
- *
- * The remote side have indicated that it want the channel to be opened, so
- * complete the state handshake and probe our client driver.
+ * Helper for opening a channel
  */
-static int qcom_smd_dev_probe(struct device *dev)
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+                                qcom_smd_cb_t cb)
 {
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-       struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
-       struct qcom_smd_channel *channel = qsdev->channel;
        size_t bb_size;
-       int ret;
 
        /*
         * Packets are maximum 4k, but reduce if the fifo is smaller
@@ -814,12 +823,44 @@ static int qcom_smd_dev_probe(struct device *dev)
        if (!channel->bounce_buffer)
                return -ENOMEM;
 
-       channel->cb = qsdrv->callback;
-
+       qcom_smd_channel_set_callback(channel, cb);
        qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-
        qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
 
+       return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+       qcom_smd_channel_set_callback(channel, NULL);
+
+       kfree(channel->bounce_buffer);
+       channel->bounce_buffer = NULL;
+
+       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+       qcom_smd_channel_reset(channel);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+       struct qcom_smd_device *qsdev = to_smd_device(dev);
+       struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+       struct qcom_smd_channel *channel = qsdev->channel;
+       int ret;
+
+       ret = qcom_smd_channel_open(channel, qsdrv->callback);
+       if (ret)
+               return ret;
+
        ret = qsdrv->probe(qsdev);
        if (ret)
                goto err;
@@ -831,11 +872,7 @@ static int qcom_smd_dev_probe(struct device *dev)
 err:
        dev_err(&qsdev->dev, "probe failed\n");
 
-       channel->cb = NULL;
-       kfree(channel->bounce_buffer);
-       channel->bounce_buffer = NULL;
-
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+       qcom_smd_channel_close(channel);
        return ret;
 }
 
@@ -850,16 +887,15 @@ static int qcom_smd_dev_remove(struct device *dev)
        struct qcom_smd_device *qsdev = to_smd_device(dev);
        struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
        struct qcom_smd_channel *channel = qsdev->channel;
-       unsigned long flags;
+       struct qcom_smd_channel *tmp;
+       struct qcom_smd_channel *ch;
 
        qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
 
        /*
         * Make sure we don't race with the code receiving data.
         */
-       spin_lock_irqsave(&channel->recv_lock, flags);
-       channel->cb = NULL;
-       spin_unlock_irqrestore(&channel->recv_lock, flags);
+       qcom_smd_channel_set_callback(channel, NULL);
 
        /* Wake up any sleepers in qcom_smd_send() */
        wake_up_interruptible(&channel->fblockread_event);
@@ -872,15 +908,14 @@ static int qcom_smd_dev_remove(struct device *dev)
                qsdrv->remove(qsdev);
 
        /*
-        * The client is now gone, cleanup and reset the channel state.
+        * The client is now gone, close and release all channels associated
+        * with this sdev
         */
-       channel->qsdev = NULL;
-       kfree(channel->bounce_buffer);
-       channel->bounce_buffer = NULL;
-
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-
-       qcom_smd_channel_reset(channel);
+       list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
+               qcom_smd_channel_close(ch);
+               list_del(&ch->dev_list);
+               ch->qsdev = NULL;
+       }
 
        return 0;
 }
@@ -1006,6 +1041,76 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
 }
 EXPORT_SYMBOL(qcom_smd_driver_unregister);
 
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+       struct qcom_smd_channel *channel;
+       struct qcom_smd_channel *ret = NULL;
+       unsigned state;
+
+       read_lock(&edge->channels_lock);
+       list_for_each_entry(channel, &edge->channels, list) {
+               if (strcmp(channel->name, name))
+                       continue;
+
+               state = GET_RX_CHANNEL_INFO(channel, state);
+               if (state != SMD_CHANNEL_OPENING &&
+                   state != SMD_CHANNEL_OPENED)
+                       continue;
+
+               ret = channel;
+               break;
+       }
+       read_unlock(&edge->channels_lock);
+
+       return ret;
+}
+
+/**
+ * qcom_smd_open_channel() - claim additional channels on the same edge
+ * @sdev:      smd_device handle
+ * @name:      channel name
+ * @cb:                callback method to use for incoming data
+ *
+ * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
+ * ready.
+ */
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev,
+                                              const char *name,
+                                              qcom_smd_cb_t cb)
+{
+       struct qcom_smd_channel *channel;
+       struct qcom_smd_edge *edge = sdev->channel->edge;
+       int ret;
+
+       /* Wait up to HZ for the channel to appear */
+       ret = wait_event_interruptible_timeout(edge->new_channel_event,
+                       (channel = qcom_smd_find_channel(edge, name)) != NULL,
+                       HZ);
+       if (!ret)
+               return ERR_PTR(-ETIMEDOUT);
+
+       if (channel->state != SMD_CHANNEL_CLOSED) {
+               dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
+               return ERR_PTR(-EBUSY);
+       }
+
+       channel->qsdev = sdev;
+       ret = qcom_smd_channel_open(channel, cb);
+       if (ret) {
+               channel->qsdev = NULL;
+               return ERR_PTR(ret);
+       }
+
+       /*
+        * Append the list of channel to the channels associated with the sdev
+        */
+       list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
+
+       return channel;
+}
+EXPORT_SYMBOL(qcom_smd_open_channel);
+
 /*
  * Allocate the qcom_smd_channel object for a newly found smd channel,
  * retrieving and validating the smem items involved.
@@ -1027,6 +1132,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
        if (!channel)
                return ERR_PTR(-ENOMEM);
 
+       INIT_LIST_HEAD(&channel->dev_list);
        channel->edge = edge;
        channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
        if (!channel->name)
@@ -1089,8 +1195,9 @@ free_name_and_channel:
  * qcom_smd_create_channel() to create representations of these and add
  * them to the edge's list of channels.
  */
-static void qcom_discover_channels(struct qcom_smd_edge *edge)
+static void qcom_channel_scan_worker(struct work_struct *work)
 {
+       struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
        struct qcom_smd_alloc_entry *alloc_tbl;
        struct qcom_smd_alloc_entry *entry;
        struct qcom_smd_channel *channel;
@@ -1134,16 +1241,18 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
                        if (IS_ERR(channel))
                                continue;
 
-                       spin_lock_irqsave(&edge->channels_lock, flags);
+                       write_lock_irqsave(&edge->channels_lock, flags);
                        list_add(&channel->list, &edge->channels);
-                       spin_unlock_irqrestore(&edge->channels_lock, flags);
+                       write_unlock_irqrestore(&edge->channels_lock, flags);
 
                        dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
                        set_bit(i, edge->allocated[tbl]);
+
+                       wake_up_interruptible(&edge->new_channel_event);
                }
        }
 
-       schedule_work(&edge->work);
+       schedule_work(&edge->state_work);
 }
 
 /*
@@ -1151,29 +1260,22 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
  * then scans all registered channels for state changes that should be handled
  * by creating or destroying smd client devices for the registered channels.
  *
- * LOCKING: edge->channels_lock is not needed to be held during the traversal
- * of the channels list as it's done synchronously with the only writer.
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
  */
 static void qcom_channel_state_worker(struct work_struct *work)
 {
        struct qcom_smd_channel *channel;
        struct qcom_smd_edge *edge = container_of(work,
                                                  struct qcom_smd_edge,
-                                                 work);
+                                                 state_work);
        unsigned remote_state;
 
-       /*
-        * Rescan smem if we have reason to belive that there are new channels.
-        */
-       if (edge->need_rescan) {
-               edge->need_rescan = false;
-               qcom_discover_channels(edge);
-       }
-
        /*
         * Register a device for any closed channel where the remote processor
         * is showing interest in opening the channel.
         */
+       read_lock(&edge->channels_lock);
        list_for_each_entry(channel, &edge->channels, list) {
                if (channel->state != SMD_CHANNEL_CLOSED)
                        continue;
@@ -1183,7 +1285,9 @@ static void qcom_channel_state_worker(struct work_struct *work)
                    remote_state != SMD_CHANNEL_OPENED)
                        continue;
 
+               read_unlock(&edge->channels_lock);
                qcom_smd_create_device(channel);
+               read_lock(&edge->channels_lock);
        }
 
        /*
@@ -1200,8 +1304,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
                    remote_state == SMD_CHANNEL_OPENED)
                        continue;
 
+               read_unlock(&edge->channels_lock);
                qcom_smd_destroy_device(channel);
+               read_lock(&edge->channels_lock);
        }
+       read_unlock(&edge->channels_lock);
 }
 
 /*
@@ -1217,9 +1324,10 @@ static int qcom_smd_parse_edge(struct device *dev,
        int ret;
 
        INIT_LIST_HEAD(&edge->channels);
-       spin_lock_init(&edge->channels_lock);
+       rwlock_init(&edge->channels_lock);
 
-       INIT_WORK(&edge->work, qcom_channel_state_worker);
+       INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+       INIT_WORK(&edge->state_work, qcom_channel_state_worker);
 
        edge->of_node = of_node_get(node);
 
@@ -1303,13 +1411,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
        for_each_available_child_of_node(pdev->dev.of_node, node) {
                edge = &smd->edges[i++];
                edge->smd = smd;
+               init_waitqueue_head(&edge->new_channel_event);
 
                ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
                if (ret)
                        continue;
 
-               edge->need_rescan = true;
-               schedule_work(&edge->work);
+               schedule_work(&edge->scan_work);
        }
 
        platform_set_drvdata(pdev, smd);
@@ -1332,8 +1440,10 @@ static int qcom_smd_remove(struct platform_device *pdev)
                edge = &smd->edges[i];
 
                disable_irq(edge->irq);
-               cancel_work_sync(&edge->work);
+               cancel_work_sync(&edge->scan_work);
+               cancel_work_sync(&edge->state_work);
 
+               /* No need to lock here, because the writer is gone */
                list_for_each_entry(channel, &edge->channels, list) {
                        if (!channel->qsdev)
                                continue;
index 5548a31e1a39a100142b45841cbe38e1aa007e38..f324451e0940eeadd10e3d76af56c727e30c6de6 100644 (file)
@@ -2,6 +2,8 @@
  * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  * Copyright (c) 2014,2015, Linaro Ltd.
  *
+ * SAW power controller driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -12,7 +14,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -378,8 +379,5 @@ static struct platform_driver spm_driver = {
                .of_match_table = spm_match_table,
        },
 };
-module_platform_driver(spm_driver);
 
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SAW power controller driver");
-MODULE_ALIAS("platform:saw");
+builtin_platform_driver(spm_driver);
index 534c58937a566205b85e216bf1e230c9186a637f..43155e1f97b92ca7ee8f05b901ad681501041d80 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
 #include <dt-bindings/power/rk3288-power.h>
+#include <dt-bindings/power/rk3368-power.h>
 
 struct rockchip_domain_info {
        int pwr_mask;
@@ -75,6 +76,9 @@ struct rockchip_pmu {
 #define DOMAIN_RK3288(pwr, status, req)                \
        DOMAIN(pwr, status, req, req, (req) + 16)
 
+#define DOMAIN_RK3368(pwr, status, req)                \
+       DOMAIN(pwr, status, req, (req) + 16, req)
+
 static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
 {
        struct rockchip_pmu *pmu = pd->pmu;
@@ -419,6 +423,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
                if (error) {
                        dev_err(dev, "failed to handle node %s: %d\n",
                                node->name, error);
+                       of_node_put(node);
                        goto err_out;
                }
        }
@@ -444,6 +449,14 @@ static const struct rockchip_domain_info rk3288_pm_domains[] = {
        [RK3288_PD_GPU]         = DOMAIN_RK3288(9, 9, 2),
 };
 
+static const struct rockchip_domain_info rk3368_pm_domains[] = {
+       [RK3368_PD_PERI]        = DOMAIN_RK3368(13, 12, 6),
+       [RK3368_PD_VIO]         = DOMAIN_RK3368(15, 14, 8),
+       [RK3368_PD_VIDEO]       = DOMAIN_RK3368(14, 13, 7),
+       [RK3368_PD_GPU_0]       = DOMAIN_RK3368(16, 15, 2),
+       [RK3368_PD_GPU_1]       = DOMAIN_RK3368(17, 16, 2),
+};
+
 static const struct rockchip_pmu_info rk3288_pmu = {
        .pwr_offset = 0x08,
        .status_offset = 0x0c,
@@ -461,11 +474,32 @@ static const struct rockchip_pmu_info rk3288_pmu = {
        .domain_info = rk3288_pm_domains,
 };
 
+static const struct rockchip_pmu_info rk3368_pmu = {
+       .pwr_offset = 0x0c,
+       .status_offset = 0x10,
+       .req_offset = 0x3c,
+       .idle_offset = 0x40,
+       .ack_offset = 0x40,
+
+       .core_pwrcnt_offset = 0x48,
+       .gpu_pwrcnt_offset = 0x50,
+
+       .core_power_transition_time = 24,
+       .gpu_power_transition_time = 24,
+
+       .num_domains = ARRAY_SIZE(rk3368_pm_domains),
+       .domain_info = rk3368_pm_domains,
+};
+
 static const struct of_device_id rockchip_pm_domain_dt_match[] = {
        {
                .compatible = "rockchip,rk3288-power-controller",
                .data = (void *)&rk3288_pmu,
        },
+       {
+               .compatible = "rockchip,rk3368-power-controller",
+               .data = (void *)&rk3368_pmu,
+       },
        { /* sentinel */ },
 };
 
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
new file mode 100644 (file)
index 0000000..895f169
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# SAMSUNG SoC drivers
+#
+menu "Samsung SOC driver support"
+
+config SOC_SAMSUNG
+       bool
+
+config EXYNOS_SROM
+       bool
+       depends on ARM && ARCH_EXYNOS
+
+config EXYNOS_PMU
+       bool
+       depends on ARM && ARCH_EXYNOS
+
+endmenu
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
new file mode 100644 (file)
index 0000000..cef7970
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_EXYNOS_SROM)      += exynos-srom.o
+obj-$(CONFIG_EXYNOS_PMU)       += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \
+                                       exynos5250-pmu.o exynos5420-pmu.o
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
new file mode 100644 (file)
index 0000000..0acdfd8
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS - CPU PMU(Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+struct exynos_pmu_context {
+       struct device *dev;
+       const struct exynos_pmu_data *pmu_data;
+};
+
+void __iomem *pmu_base_addr;
+static struct exynos_pmu_context *pmu_context;
+
+void pmu_raw_writel(u32 val, u32 offset)
+{
+       writel_relaxed(val, pmu_base_addr + offset);
+}
+
+u32 pmu_raw_readl(u32 offset)
+{
+       return readl_relaxed(pmu_base_addr + offset);
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+       unsigned int i;
+       const struct exynos_pmu_data *pmu_data;
+
+       if (!pmu_context)
+               return;
+
+       pmu_data = pmu_context->pmu_data;
+
+       if (pmu_data->powerdown_conf)
+               pmu_data->powerdown_conf(mode);
+
+       if (pmu_data->pmu_config) {
+               for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
+                       pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
+                                       pmu_data->pmu_config[i].offset);
+       }
+
+       if (pmu_data->powerdown_conf_extra)
+               pmu_data->powerdown_conf_extra(mode);
+
+       if (pmu_data->pmu_config_extra) {
+               for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
+                       pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
+                                       pmu_data->pmu_config_extra[i].offset);
+       }
+}
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static const struct of_device_id exynos_pmu_of_device_ids[] = {
+       {
+               .compatible = "samsung,exynos3250-pmu",
+               .data = &exynos3250_pmu_data,
+       }, {
+               .compatible = "samsung,exynos4210-pmu",
+               .data = &exynos4210_pmu_data,
+       }, {
+               .compatible = "samsung,exynos4212-pmu",
+               .data = &exynos4212_pmu_data,
+       }, {
+               .compatible = "samsung,exynos4412-pmu",
+               .data = &exynos4412_pmu_data,
+       }, {
+               .compatible = "samsung,exynos5250-pmu",
+               .data = &exynos5250_pmu_data,
+       }, {
+               .compatible = "samsung,exynos5420-pmu",
+               .data = &exynos5420_pmu_data,
+       },
+       { /*sentinel*/ },
+};
+
+static int exynos_pmu_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pmu_base_addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pmu_base_addr))
+               return PTR_ERR(pmu_base_addr);
+
+       pmu_context = devm_kzalloc(&pdev->dev,
+                       sizeof(struct exynos_pmu_context),
+                       GFP_KERNEL);
+       if (!pmu_context) {
+               dev_err(dev, "Cannot allocate memory.\n");
+               return -ENOMEM;
+       }
+       pmu_context->dev = dev;
+
+       match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
+
+       pmu_context->pmu_data = match->data;
+
+       if (pmu_context->pmu_data->pmu_init)
+               pmu_context->pmu_data->pmu_init();
+
+       platform_set_drvdata(pdev, pmu_context);
+
+       dev_dbg(dev, "Exynos PMU Driver probe done\n");
+       return 0;
+}
+
+static struct platform_driver exynos_pmu_driver = {
+       .driver  = {
+               .name   = "exynos-pmu",
+               .of_match_table = exynos_pmu_of_device_ids,
+       },
+       .probe = exynos_pmu_probe,
+};
+
+static int __init exynos_pmu_init(void)
+{
+       return platform_driver_register(&exynos_pmu_driver);
+
+}
+postcore_initcall(exynos_pmu_init);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
new file mode 100644 (file)
index 0000000..a469e36
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Header for EXYNOS PMU Driver support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PMU_H
+#define __EXYNOS_PMU_H
+
+#include <linux/io.h>
+
+#define PMU_TABLE_END  (-1U)
+
+struct exynos_pmu_conf {
+       unsigned int offset;
+       u8 val[NUM_SYS_POWERDOWN];
+};
+
+struct exynos_pmu_data {
+       const struct exynos_pmu_conf *pmu_config;
+       const struct exynos_pmu_conf *pmu_config_extra;
+
+       void (*pmu_init)(void);
+       void (*powerdown_conf)(enum sys_powerdown);
+       void (*powerdown_conf_extra)(enum sys_powerdown);
+};
+
+extern void __iomem *pmu_base_addr;
+/* list of all exported SoC specific data */
+extern const struct exynos_pmu_data exynos3250_pmu_data;
+extern const struct exynos_pmu_data exynos4210_pmu_data;
+extern const struct exynos_pmu_data exynos4212_pmu_data;
+extern const struct exynos_pmu_data exynos4412_pmu_data;
+extern const struct exynos_pmu_data exynos5250_pmu_data;
+extern const struct exynos_pmu_data exynos5420_pmu_data;
+
+extern void pmu_raw_writel(u32 val, u32 offset);
+extern u32 pmu_raw_readl(u32 offset);
+#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/exynos-srom.c b/drivers/soc/samsung/exynos-srom.c
new file mode 100644 (file)
index 0000000..a4cf547
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *           http://www.samsung.com/
+ *
+ * EXYNOS - SROM Controller support
+ * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "exynos-srom.h"
+
+static const unsigned long exynos_srom_offsets[] = {
+       /* SROM side */
+       EXYNOS_SROM_BW,
+       EXYNOS_SROM_BC0,
+       EXYNOS_SROM_BC1,
+       EXYNOS_SROM_BC2,
+       EXYNOS_SROM_BC3,
+};
+
+/**
+ * struct exynos_srom_reg_dump: register dump of SROM Controller registers.
+ * @offset: srom register offset from the controller base address.
+ * @value: the value of register under the offset.
+ */
+struct exynos_srom_reg_dump {
+       u32     offset;
+       u32     value;
+};
+
+/**
+ * struct exynos_srom: platform data for exynos srom controller driver.
+ * @dev: platform device pointer
+ * @reg_base: srom base address
+ * @reg_offset: exynos_srom_reg_dump pointer to hold offset and its value.
+ */
+struct exynos_srom {
+       struct device *dev;
+       void __iomem *reg_base;
+       struct exynos_srom_reg_dump *reg_offset;
+};
+
+static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
+               const unsigned long *rdump,
+               unsigned long nr_rdump)
+{
+       struct exynos_srom_reg_dump *rd;
+       unsigned int i;
+
+       rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
+       if (!rd)
+               return NULL;
+
+       for (i = 0; i < nr_rdump; ++i)
+               rd[i].offset = rdump[i];
+
+       return rd;
+}
+
+static int exynos_srom_configure_bank(struct exynos_srom *srom,
+                                     struct device_node *np)
+{
+       u32 bank, width, pmc;
+       u32 timing[6];
+       u32 cs, bw;
+
+       if (of_property_read_u32(np, "reg", &bank))
+               return -EINVAL;
+       if (of_property_read_u32(np, "reg-io-width", &width))
+               width = 1;
+       if (of_property_read_u32(np, "samsung,srom-page-mode", &pmc))
+               pmc = 0;
+       if (of_property_read_u32_array(np, "samsung,srom-timing", timing,
+                                      ARRAY_SIZE(timing)))
+               return -EINVAL;
+
+       bank *= 4; /* Convert bank into shift/offset */
+
+       cs = 1 << EXYNOS_SROM_BW__BYTEENABLE__SHIFT;
+       if (width == 2)
+               cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
+
+       bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW);
+       bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
+       __raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW);
+
+       __raw_writel((pmc << EXYNOS_SROM_BCX__PMC__SHIFT) |
+                   (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+                   (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+                   (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+                   (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+                   (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+                   (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+                   srom->reg_base + EXYNOS_SROM_BC0 + bank);
+
+       return 0;
+}
+
+static int exynos_srom_probe(struct platform_device *pdev)
+{
+       struct device_node *np, *child;
+       struct exynos_srom *srom;
+       struct device *dev = &pdev->dev;
+       bool bad_bank_config = false;
+
+       np = dev->of_node;
+       if (!np) {
+               dev_err(&pdev->dev, "could not find device info\n");
+               return -EINVAL;
+       }
+
+       srom = devm_kzalloc(&pdev->dev,
+                       sizeof(struct exynos_srom), GFP_KERNEL);
+       if (!srom)
+               return -ENOMEM;
+
+       srom->dev = dev;
+       srom->reg_base = of_iomap(np, 0);
+       if (!srom->reg_base) {
+               dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+               return -ENOMEM;
+       }
+
+       platform_set_drvdata(pdev, srom);
+
+       srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+                       sizeof(exynos_srom_offsets));
+       if (!srom->reg_offset) {
+               iounmap(srom->reg_base);
+               return -ENOMEM;
+       }
+
+       for_each_child_of_node(np, child) {
+               if (exynos_srom_configure_bank(srom, child)) {
+                       dev_err(dev,
+                               "Could not decode bank configuration for %s\n",
+                               child->name);
+                       bad_bank_config = true;
+               }
+       }
+
+       /*
+        * If any bank failed to configure, we still provide suspend/resume,
+        * but do not probe child devices
+        */
+       if (bad_bank_config)
+               return 0;
+
+       return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static int exynos_srom_remove(struct platform_device *pdev)
+{
+       struct exynos_srom *srom = platform_get_drvdata(pdev);
+
+       kfree(srom->reg_offset);
+       iounmap(srom->reg_base);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void exynos_srom_save(void __iomem *base,
+                                   struct exynos_srom_reg_dump *rd,
+                                   unsigned int num_regs)
+{
+       for (; num_regs > 0; --num_regs, ++rd)
+               rd->value = readl(base + rd->offset);
+}
+
+static void exynos_srom_restore(void __iomem *base,
+                                     const struct exynos_srom_reg_dump *rd,
+                                     unsigned int num_regs)
+{
+       for (; num_regs > 0; --num_regs, ++rd)
+               writel(rd->value, base + rd->offset);
+}
+
+static int exynos_srom_suspend(struct device *dev)
+{
+       struct exynos_srom *srom = dev_get_drvdata(dev);
+
+       exynos_srom_save(srom->reg_base, srom->reg_offset,
+                               ARRAY_SIZE(exynos_srom_offsets));
+       return 0;
+}
+
+static int exynos_srom_resume(struct device *dev)
+{
+       struct exynos_srom *srom = dev_get_drvdata(dev);
+
+       exynos_srom_restore(srom->reg_base, srom->reg_offset,
+                               ARRAY_SIZE(exynos_srom_offsets));
+       return 0;
+}
+#endif
+
+static const struct of_device_id of_exynos_srom_ids[] = {
+       {
+               .compatible     = "samsung,exynos-srom",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_exynos_srom_ids);
+
+static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
+
+static struct platform_driver exynos_srom_driver = {
+       .probe = exynos_srom_probe,
+       .remove = exynos_srom_remove,
+       .driver = {
+               .name = "exynos-srom",
+               .of_match_table = of_exynos_srom_ids,
+               .pm = &exynos_srom_pm_ops,
+       },
+};
+module_platform_driver(exynos_srom_driver);
+
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_DESCRIPTION("Exynos SROM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/samsung/exynos-srom.h b/drivers/soc/samsung/exynos-srom.h
new file mode 100644 (file)
index 0000000..34660c6
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Exynos SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __EXYNOS_SROM_H
+#define __EXYNOS_SROM_H __FILE__
+
+#define EXYNOS_SROMREG(x)              (x)
+
+#define EXYNOS_SROM_BW         EXYNOS_SROMREG(0x0)
+#define EXYNOS_SROM_BC0                EXYNOS_SROMREG(0x4)
+#define EXYNOS_SROM_BC1                EXYNOS_SROMREG(0x8)
+#define EXYNOS_SROM_BC2                EXYNOS_SROMREG(0xc)
+#define EXYNOS_SROM_BC3                EXYNOS_SROMREG(0x10)
+#define EXYNOS_SROM_BC4                EXYNOS_SROMREG(0x14)
+#define EXYNOS_SROM_BC5                EXYNOS_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define EXYNOS_SROM_BW__DATAWIDTH__SHIFT       0
+#define EXYNOS_SROM_BW__ADDRMODE__SHIFT                1
+#define EXYNOS_SROM_BW__WAITENABLE__SHIFT      2
+#define EXYNOS_SROM_BW__BYTEENABLE__SHIFT      3
+
+#define EXYNOS_SROM_BW__CS_MASK                        0xf
+
+#define EXYNOS_SROM_BW__NCS0__SHIFT            0
+#define EXYNOS_SROM_BW__NCS1__SHIFT            4
+#define EXYNOS_SROM_BW__NCS2__SHIFT            8
+#define EXYNOS_SROM_BW__NCS3__SHIFT            12
+#define EXYNOS_SROM_BW__NCS4__SHIFT            16
+#define EXYNOS_SROM_BW__NCS5__SHIFT            20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define EXYNOS_SROM_BCX__PMC__SHIFT            0
+#define EXYNOS_SROM_BCX__TACP__SHIFT           4
+#define EXYNOS_SROM_BCX__TCAH__SHIFT           8
+#define EXYNOS_SROM_BCX__TCOH__SHIFT           12
+#define EXYNOS_SROM_BCX__TACC__SHIFT           16
+#define EXYNOS_SROM_BCX__TCOS__SHIFT           24
+#define EXYNOS_SROM_BCX__TACS__SHIFT           28
+
+#endif /* __EXYNOS_SROM_H */
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
new file mode 100644 (file)
index 0000000..20b3ab8
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS3250 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static struct exynos_pmu_conf exynos3250_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
+       { EXYNOS3_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS3_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS3_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS3_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
+       { EXYNOS3_ARM_L2_SYS_PWR_REG,                   { 0x0, 0x0, 0x3} },
+       { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
+       { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
+       { EXYNOS3_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG,       { 0x1, 0x1, 0x1} },
+       { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
+       { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS3_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS3_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS3_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x3, 0x3} },
+       { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG,          { 0x3, 0x0, 0x0} },
+       { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x1} },
+       { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG,          { 0x3, 0x3, 0x3} },
+       { EXYNOS3_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
+       { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG,      { 0x1, 0x1, 0x0} },
+       { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG,     { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS3_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x0} },
+       { EXYNOS3_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
+       { EXYNOS3_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+       { EXYNOS3_CAM_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_LCD0_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS3_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_MAUDIO_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
+       { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos3250_list_feed[] = {
+       EXYNOS3_ARM_CORE_OPTION(0),
+       EXYNOS3_ARM_CORE_OPTION(1),
+       EXYNOS3_ARM_CORE_OPTION(2),
+       EXYNOS3_ARM_CORE_OPTION(3),
+       EXYNOS3_ARM_COMMON_OPTION,
+       EXYNOS3_TOP_PWR_OPTION,
+       EXYNOS3_CORE_TOP_PWR_OPTION,
+       S5P_CAM_OPTION,
+       S5P_MFC_OPTION,
+       S5P_G3D_OPTION,
+       S5P_LCD0_OPTION,
+       S5P_ISP_OPTION,
+};
+
+static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
+{
+       unsigned int i;
+       unsigned int tmp;
+
+       /* Enable only SC_FEEDBACK */
+       for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
+               tmp = pmu_raw_readl(exynos3250_list_feed[i]);
+               tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
+               tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
+               pmu_raw_writel(tmp, exynos3250_list_feed[i]);
+       }
+
+       if (mode != SYS_SLEEP)
+               return;
+
+       pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
+       pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
+       pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
+       pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
+                      EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
+}
+
+static void exynos3250_pmu_init(void)
+{
+       unsigned int value;
+
+       /*
+        * To prevent from issuing new bus request form L2 memory system
+        * If core status is power down, should be set '1' to L2 power down
+        */
+       value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
+       value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
+
+       /* Enable USE_STANDBY_WFI for all CORE */
+       pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+       /*
+        * Set PSHOLD port for output high
+        */
+       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+       value |= S5P_PS_HOLD_OUTPUT_HIGH;
+       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+
+       /*
+        * Enable signal for PSHOLD port
+        */
+       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+       value |= S5P_PS_HOLD_EN;
+       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+}
+
+const struct exynos_pmu_data exynos3250_pmu_data = {
+       .pmu_config     = exynos3250_pmu_config,
+       .pmu_init       = exynos3250_pmu_init,
+       .powerdown_conf_extra   = exynos3250_powerdown_conf_extra,
+};
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
new file mode 100644 (file)
index 0000000..bc4fa73
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS4 - CPU PMU(Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
+       { S5P_L2_0_LOWPWR,                      { 0x2, 0x2, 0x3 } },
+       { S5P_L2_1_LOWPWR,                      { 0x2, 0x2, 0x3 } },
+       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
+       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_LCD1_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_LCD1_LOWPWR,            { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
+       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
+       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
+       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_MODIMIF_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
+       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_PCIE_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
+       { S5P_SATA_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
+       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
+       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
+       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
+       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
+       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
+       { S5P_LCD1_LOWPWR,                      { 0x7, 0x0, 0x0 } },
+       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
+       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
+       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ISP_ARM_LOWPWR,                   { 0x1, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR,     { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR,   { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
+       { S5P_L2_0_LOWPWR,                      { 0x0, 0x0, 0x3 } },
+       /* XXX_OPTION register should be set other field */
+       { S5P_ARM_L2_0_OPTION,                  { 0x10, 0x10, 0x0 } },
+       { S5P_L2_1_LOWPWR,                      { 0x0, 0x0, 0x3 } },
+       { S5P_ARM_L2_1_OPTION,                  { 0x10, 0x10, 0x0 } },
+       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
+       { S5P_DRAM_FREQ_DOWN_LOWPWR,            { 0x1, 0x1, 0x1 } },
+       { S5P_DDRPHY_DLLOFF_LOWPWR,             { 0x1, 0x1, 0x1 } },
+       { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR,        { 0x1, 0x1, 0x1 } },
+       { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_COREBLK_LOWPWR,         { 0x1, 0x1, 0x0 } },
+       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_MPLLUSER_SYSCLK_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_ISP_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_ISP_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
+       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
+       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
+       { S5P_TOP_BUS_COREBLK_LOWPWR,           { 0x3, 0x0, 0x0 } },
+       { S5P_TOP_RETENTION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x1 } },
+       { S5P_TOP_PWR_COREBLK_LOWPWR,           { 0x3, 0x0, 0x3 } },
+       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_OSCCLK_GATE_LOWPWR,               { 0x1, 0x0, 0x1 } },
+       { S5P_LOGIC_RESET_COREBLK_LOWPWR,       { 0x1, 0x1, 0x0 } },
+       { S5P_OSCCLK_GATE_COREBLK_LOWPWR,       { 0x1, 0x0, 0x1 } },
+       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_ONENAND_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
+       { S5P_HSI_MEM_LOWPWR,                   { 0x3, 0x0, 0x0 } },
+       { S5P_HSI_MEM_OPTION,                   { 0x10, 0x10, 0x0 } },
+       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_G2D_ACP_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
+       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
+       { S5P_USBOTG_MEM_OPTION,                { 0x10, 0x10, 0x0 } },
+       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_HSMMC_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
+       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_CSSYS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
+       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_SECSS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
+       { S5P_ROTATOR_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_ROTATOR_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
+       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
+       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_ISOLATION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
+       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
+       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
+       { S5P_GPIO_MODE_COREBLK_LOWPWR,         { 0x1, 0x0, 0x0 } },
+       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_TOP_ASB_RESET_LOWPWR,             { 0x1, 0x1, 0x1 } },
+       { S5P_TOP_ASB_ISOLATION_LOWPWR,         { 0x1, 0x0, 0x1 } },
+       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
+       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
+       { S5P_ISP_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
+       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
+       { S5P_CMU_SYSCLK_ISP_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SYSCLK_GPS_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
+       { S5P_ARM_CORE2_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE2,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL2,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE3_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE3,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL3,                 { 0x0, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+const struct exynos_pmu_data exynos4210_pmu_data = {
+       .pmu_config     = exynos4210_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4212_pmu_data = {
+       .pmu_config     = exynos4x12_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4412_pmu_data = {
+       .pmu_config             = exynos4x12_pmu_config,
+       .pmu_config_extra       = exynos4412_pmu_config,
+};
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
new file mode 100644 (file)
index 0000000..3fac425
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS5250 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_FSYS_ARM_SYS_PWR_REG,                 { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
+       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x3, 0x3, 0x3} },
+       { EXYNOS5_ARM_L2_OPTION,                        { 0x10, 0x10, 0x0 } },
+       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x0, 0x3} },
+       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x3} },
+       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
+       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x1} },
+       { EXYNOS5_USBOTG_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_G2D_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_USBDRD_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SDMMC_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_CSSYS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SECSS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,              { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTRAM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTROM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_JPEG_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_JPEG_MEM_OPTION,                      { 0x10, 0x10, 0x0} },
+       { EXYNOS5_HSI_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SATA_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x1} },
+       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x1} },
+       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_DISP1_SYS_PWR_REG,                    { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MAU_SYS_PWR_REG,                      { 0x7, 0x7, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,          { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,           { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5_list_both_cnt_feed[] = {
+       EXYNOS5_ARM_CORE0_OPTION,
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_ARM_COMMON_OPTION,
+       EXYNOS5_GSCL_OPTION,
+       EXYNOS5_ISP_OPTION,
+       EXYNOS5_MFC_OPTION,
+       EXYNOS5_G3D_OPTION,
+       EXYNOS5_DISP1_OPTION,
+       EXYNOS5_MAU_OPTION,
+       EXYNOS5_TOP_PWR_OPTION,
+       EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+static unsigned int const exynos5_list_disable_wfi_wfe[] = {
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_FSYS_ARM_OPTION,
+       EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5250_pmu_init(void)
+{
+       unsigned int value;
+       /*
+        * When SYS_WDTRESET is set, watchdog timer reset request
+        * is ignored by power management unit.
+        */
+       value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
+       value &= ~EXYNOS5_SYS_WDTRESET;
+       pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
+
+       value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
+       value &= ~EXYNOS5_SYS_WDTRESET;
+       pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
+}
+
+static void exynos5_powerdown_conf(enum sys_powerdown mode)
+{
+       unsigned int i;
+       unsigned int tmp;
+
+       /*
+        * Enable both SC_FEEDBACK and SC_COUNTER
+        */
+       for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
+               tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
+               tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+                       EXYNOS5_USE_SC_COUNTER);
+               pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+       }
+
+       /*
+        * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+        */
+       tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+       tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+       /*
+        * Disable WFI/WFE on XXX_OPTION
+        */
+       for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
+               tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
+               tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+                        EXYNOS5_OPTION_USE_STANDBYWFI);
+               pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
+       }
+}
+
+const struct exynos_pmu_data exynos5250_pmu_data = {
+       .pmu_config     = exynos5250_pmu_config,
+       .pmu_init       = exynos5250_pmu_init,
+       .powerdown_conf = exynos5_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
new file mode 100644 (file)
index 0000000..b962fb6
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS5420 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pm.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include <asm/cputype.h>
+
+#include "exynos-pmu.h"
+
+static struct exynos_pmu_conf exynos5420_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_ARM_CORE2_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_ARM_CORE3_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE0_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE1_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE2_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE3_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_ARM_COMMON_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_COMMON_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_L2_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x0, 0x1} },
+       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x0, 0x1} },
+       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x3, 0x0} },
+       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
+       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG,            { 0x3, 0x0, 0x3} },
+       { EXYNOS5420_INTROM_MEM_SYS_PWR_REG,            { 0x3, 0x0, 0x3} },
+       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x0} },
+       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_DISP1_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_MAU_SYS_PWR_REG,                   { 0x7, 0x7, 0x0} },
+       { EXYNOS5420_G2D_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_MSC_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_FSYS_SYS_PWR_REG,                  { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_FSYS2_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_PSGEN_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_PERIC_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_WCORE_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5420_list_disable_pmu_reg[] = {
+       EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
+       EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
+       EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
+       EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
+       EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
+       EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
+       EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
+       EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
+       EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
+};
+
+static void exynos5420_powerdown_conf(enum sys_powerdown mode)
+{
+       u32 this_cluster;
+
+       this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
+
+       /*
+        * set the cluster id to IROM register to ensure that we wake
+        * up with the current cluster.
+        */
+       pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
+}
+
+static void exynos5420_pmu_init(void)
+{
+       unsigned int value;
+       int i;
+
+       /*
+        * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
+        * for local power blocks to Low initially as per Table 8-4:
+        * "System-Level Power-Down Configuration Registers".
+        */
+       for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
+               pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
+
+       /* Enable USE_STANDBY_WFI for all CORE */
+       pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+       value  = pmu_raw_readl(EXYNOS_L2_OPTION(0));
+       value &= ~EXYNOS5_USE_RETENTION;
+       pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
+
+       value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
+       value &= ~EXYNOS5_USE_RETENTION;
+       pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
+
+       /*
+        * If L2_COMMON is turned off, clocks related to ATB async
+        * bridge are gated. Thus, when ISP power is gated, LPI
+        * may get stuck.
+        */
+       value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
+       value |= EXYNOS5420_ATB_ISP_ARM;
+       pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
+
+       value  = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
+       value |= EXYNOS5420_ATB_KFC;
+       pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
+
+       /* Prevent issue of new bus request from L2 memory */
+       value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
+       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
+
+       value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
+       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
+
+       /* This setting is to reduce suspend/resume time */
+       pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
+
+       /* Serialized CPU wakeup of Eagle */
+       pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
+
+       pmu_raw_writel(SPREAD_USE_STANDWFI,
+                       EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
+
+       pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
+
+       pr_info("EXYNOS5420 PMU initialized\n");
+}
+
+const struct exynos_pmu_data exynos5420_pmu_data = {
+       .pmu_config     = exynos5420_pmu_config,
+       .pmu_init       = exynos5420_pmu_init,
+       .powerdown_conf = exynos5420_powerdown_conf,
+};
index bc52670c8f4b33a9b8181d19e89566a3c87f3c69..99e354c8f53f8cdff0c34c4285a42cef4021565c 100644 (file)
@@ -117,7 +117,7 @@ static int sunxi_sram_show(struct seq_file *s, void *data)
 
                        val = readl(base + sram_data->reg);
                        val >>= sram_data->offset;
-                       val &= sram_data->width;
+                       val &= GENMASK(sram_data->width - 1, 0);
 
                        for (func = sram_data->func; func->func; func++) {
                                seq_printf(s, "\t\t%s%c\n", func->func,
@@ -208,7 +208,8 @@ int sunxi_sram_claim(struct device *dev)
                return -EBUSY;
        }
 
-       mask = GENMASK(sram_data->offset + sram_data->width, sram_data->offset);
+       mask = GENMASK(sram_data->offset + sram_data->width - 1,
+                      sram_data->offset);
        val = readl(base + sram_data->reg);
        val &= ~mask;
        writel(val | ((device << sram_data->offset) & mask),
index d0c3c3e085e33fcba1fa994b722d226f96d1fbeb..03089ad2fc6574be9af2b236d7c5c3761c971b7e 100644 (file)
@@ -31,7 +31,6 @@ config ARCH_TEGRA_3x_SOC
 config ARCH_TEGRA_114_SOC
        bool "Enable support for Tegra114 family"
        select ARM_ERRATA_798181 if SMP
-       select ARM_L1_CACHE_SHIFT_6
        select HAVE_ARM_ARCH_TIMER
        select PINCTRL_TEGRA114
        select TEGRA_TIMER
@@ -41,7 +40,6 @@ config ARCH_TEGRA_114_SOC
 
 config ARCH_TEGRA_124_SOC
        bool "Enable support for Tegra124 family"
-       select ARM_L1_CACHE_SHIFT_6
        select HAVE_ARM_ARCH_TIMER
        select PINCTRL_TEGRA124
        select TEGRA_TIMER
index bc34cf7482fb559c6fcfdece56a5b4d88c24266b..88c7e506177e89354b9eb3fb533632b1ff050c09 100644 (file)
@@ -113,8 +113,11 @@ struct tegra_pmc_soc {
 
 /**
  * struct tegra_pmc - NVIDIA Tegra PMC
+ * @dev: pointer to PMC device structure
  * @base: pointer to I/O remapped register region
  * @clk: pointer to pclk clock
+ * @soc: pointer to SoC data structure
+ * @debugfs: pointer to debugfs entry
  * @rate: currently configured rate of pclk
  * @suspend_mode: lowest suspend mode available
  * @cpu_good_time: CPU power good time (in microseconds)
@@ -134,6 +137,7 @@ struct tegra_pmc {
        struct device *dev;
        void __iomem *base;
        struct clk *clk;
+       struct dentry *debugfs;
 
        const struct tegra_pmc_soc *soc;
 
@@ -445,11 +449,9 @@ static const struct file_operations powergate_fops = {
 
 static int tegra_powergate_debugfs_init(void)
 {
-       struct dentry *d;
-
-       d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
-                               &powergate_fops);
-       if (!d)
+       pmc->debugfs = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+                                          &powergate_fops);
+       if (!pmc->debugfs)
                return -ENOMEM;
 
        return 0;
@@ -727,7 +729,7 @@ static void tegra_pmc_init(struct tegra_pmc *pmc)
        tegra_pmc_writel(value, PMC_CNTRL);
 }
 
-void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
 {
        static const char disabled[] = "emergency thermal reset disabled";
        u32 pmu_addr, ctrl_id, reg_addr, reg_data, pinmux;
@@ -842,6 +844,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
 
        err = register_restart_handler(&tegra_pmc_restart_handler);
        if (err) {
+               debugfs_remove(pmc->debugfs);
                dev_err(&pdev->dev, "unable to register restart handler, %d\n",
                        err);
                return err;
@@ -1006,17 +1009,13 @@ static const char * const tegra210_powergates[] = {
        [TEGRA_POWERGATE_3D] = "3d",
        [TEGRA_POWERGATE_VENC] = "venc",
        [TEGRA_POWERGATE_PCIE] = "pcie",
-       [TEGRA_POWERGATE_L2] = "l2",
        [TEGRA_POWERGATE_MPE] = "mpe",
-       [TEGRA_POWERGATE_HEG] = "heg",
        [TEGRA_POWERGATE_SATA] = "sata",
        [TEGRA_POWERGATE_CPU1] = "cpu1",
        [TEGRA_POWERGATE_CPU2] = "cpu2",
        [TEGRA_POWERGATE_CPU3] = "cpu3",
-       [TEGRA_POWERGATE_CELP] = "celp",
        [TEGRA_POWERGATE_CPU0] = "cpu0",
        [TEGRA_POWERGATE_C0NC] = "c0nc",
-       [TEGRA_POWERGATE_C1NC] = "c1nc",
        [TEGRA_POWERGATE_SOR] = "sor",
        [TEGRA_POWERGATE_DIS] = "dis",
        [TEGRA_POWERGATE_DISB] = "disb",
index 79a8bc4f6cec9e32ec9c78627a2e68fc85ab5e12..aa9561f586ab9a047410652f811ad4ebdffbc7ca 100644 (file)
@@ -199,6 +199,7 @@ struct rockchip_spi {
        struct sg_table rx_sg;
        struct rockchip_spi_dma_data dma_rx;
        struct rockchip_spi_dma_data dma_tx;
+       struct dma_slave_caps dma_caps;
 };
 
 static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
@@ -449,7 +450,10 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                rxconf.direction = rs->dma_rx.direction;
                rxconf.src_addr = rs->dma_rx.addr;
                rxconf.src_addr_width = rs->n_bytes;
-               rxconf.src_maxburst = rs->n_bytes;
+               if (rs->dma_caps.max_burst > 4)
+                       rxconf.src_maxburst = 4;
+               else
+                       rxconf.src_maxburst = 1;
                dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
 
                rxdesc = dmaengine_prep_slave_sg(
@@ -466,7 +470,10 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                txconf.direction = rs->dma_tx.direction;
                txconf.dst_addr = rs->dma_tx.addr;
                txconf.dst_addr_width = rs->n_bytes;
-               txconf.dst_maxburst = rs->n_bytes;
+               if (rs->dma_caps.max_burst > 4)
+                       txconf.dst_maxburst = 4;
+               else
+                       txconf.dst_maxburst = 1;
                dmaengine_slave_config(rs->dma_tx.ch, &txconf);
 
                txdesc = dmaengine_prep_slave_sg(
@@ -730,6 +737,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
        }
 
        if (rs->dma_tx.ch && rs->dma_rx.ch) {
+               dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
                rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
                rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
                rs->dma_tx.direction = DMA_MEM_TO_DEV;
index be822f7a9ce6262442ce3bad9426ad6ff2fb04e9..aca282d454213addcad7169c2a8c1732a41e195e 100644 (file)
@@ -10,6 +10,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
@@ -47,9 +48,9 @@
 #define SPMI_MAPPING_BIT_IS_1_FLAG(X)  (((X) >> 8) & 0x1)
 #define SPMI_MAPPING_BIT_IS_1_RESULT(X)        (((X) >> 0) & 0xFF)
 
-#define SPMI_MAPPING_TABLE_LEN         255
 #define SPMI_MAPPING_TABLE_TREE_DEPTH  16      /* Maximum of 16-bits */
-#define PPID_TO_CHAN_TABLE_SZ          BIT(12) /* PPID is 12bit chan is 1byte*/
+#define PMIC_ARB_MAX_PPID              BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID            BIT(15)
 
 /* Ownership Table */
 #define SPMI_OWNERSHIP_TABLE_REG(N)    (0x0700 + (4 * (N)))
@@ -85,9 +86,7 @@ enum pmic_arb_cmd_op_code {
 };
 
 /* Maximum number of support PMIC peripherals */
-#define PMIC_ARB_MAX_PERIPHS           256
-#define PMIC_ARB_MAX_CHNL              128
-#define PMIC_ARB_PERIPH_ID_VALID       (1 << 15)
+#define PMIC_ARB_MAX_PERIPHS           512
 #define PMIC_ARB_TIMEOUT_US            100
 #define PMIC_ARB_MAX_TRANS_BYTES       (8)
 
@@ -125,18 +124,22 @@ struct spmi_pmic_arb_dev {
        void __iomem            *wr_base;
        void __iomem            *intr;
        void __iomem            *cnfg;
+       void __iomem            *core;
+       resource_size_t         core_size;
        raw_spinlock_t          lock;
        u8                      channel;
        int                     irq;
        u8                      ee;
-       u8                      min_apid;
-       u8                      max_apid;
-       u32                     mapping_table[SPMI_MAPPING_TABLE_LEN];
+       u16                     min_apid;
+       u16                     max_apid;
+       u32                     *mapping_table;
+       DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
        struct irq_domain       *domain;
        struct spmi_controller  *spmic;
-       u16                     apid_to_ppid[256];
+       u16                     *apid_to_ppid;
        const struct pmic_arb_ver_ops *ver_ops;
-       u8                      *ppid_to_chan;
+       u16                     *ppid_to_chan;
+       u16                     last_channel;
 };
 
 /**
@@ -158,7 +161,8 @@ struct spmi_pmic_arb_dev {
  */
 struct pmic_arb_ver_ops {
        /* spmi commands (read_cmd, write_cmd, cmd) functionality */
-       u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+       int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+                     u32 *offset);
        u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
        int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
        /* Interrupts controller functionality (offset of PIC registers) */
@@ -212,7 +216,14 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
        struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
        u32 status = 0;
        u32 timeout = PMIC_ARB_TIMEOUT_US;
-       u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
+       u32 offset;
+       int rc;
+
+       rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+       if (rc)
+               return rc;
+
+       offset += PMIC_ARB_STATUS;
 
        while (timeout--) {
                status = readl_relaxed(base + offset);
@@ -257,7 +268,11 @@ pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
        unsigned long flags;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+       if (rc)
+               return rc;
 
        cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
@@ -297,7 +312,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        u8 bc = len - 1;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+       if (rc)
+               return rc;
 
        if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
                dev_err(&ctrl->dev,
@@ -344,7 +363,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        u8 bc = len - 1;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+       if (rc)
+               return rc;
 
        if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
                dev_err(&ctrl->dev,
@@ -614,6 +637,10 @@ static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
        u32 data;
 
        for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+               if (!test_and_set_bit(index, pa->mapping_table_valid))
+                       mapping_table[index] = readl_relaxed(pa->cnfg +
+                                               SPMI_MAPPING_TABLE_REG(index));
+
                data = mapping_table[index];
 
                if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
@@ -701,18 +728,61 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
 }
 
 /* v1 offset per ee */
-static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
-       return 0x800 + 0x80 * pa->channel;
+       *offset = 0x800 + 0x80 * pa->channel;
+       return 0;
 }
 
+static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+{
+       u32 regval, offset;
+       u16 chan;
+       u16 id;
+
+       /*
+        * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+        * ppid_to_chan is an in-memory invert of that table.
+        */
+       for (chan = pa->last_channel; ; chan++) {
+               offset = PMIC_ARB_REG_CHNL(chan);
+               if (offset >= pa->core_size)
+                       break;
+
+               regval = readl_relaxed(pa->core + offset);
+               if (!regval)
+                       continue;
+
+               id = (regval >> 8) & PMIC_ARB_PPID_MASK;
+               pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+               if (id == ppid) {
+                       chan |= PMIC_ARB_CHAN_VALID;
+                       break;
+               }
+       }
+       pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+
+       return chan;
+}
+
+
 /* v2 offset per ppid (chan) and per ee */
-static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
        u16 ppid = (sid << 8) | (addr >> 8);
-       u8  chan = pa->ppid_to_chan[ppid];
+       u16 chan;
 
-       return 0x1000 * pa->ee + 0x8000 * chan;
+       chan = pa->ppid_to_chan[ppid];
+       if (!(chan & PMIC_ARB_CHAN_VALID))
+               chan = pmic_arb_find_chan(pa, ppid);
+       if (!(chan & PMIC_ARB_CHAN_VALID))
+               return -ENODEV;
+       chan &= ~PMIC_ARB_CHAN_VALID;
+
+       *offset = 0x1000 * pa->ee + 0x8000 * chan;
+       return 0;
 }
 
 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -797,7 +867,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *core;
        u32 channel, ee, hw_ver;
-       int err, i;
+       int err;
        bool is_v1;
 
        ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
@@ -808,6 +878,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        pa->spmic = ctrl;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+       pa->core_size = resource_size(res);
        core = devm_ioremap_resource(&ctrl->dev, res);
        if (IS_ERR(core)) {
                err = PTR_ERR(core);
@@ -825,10 +896,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                pa->wr_base = core;
                pa->rd_base = core;
        } else {
-               u8  chan;
-               u16 ppid;
-               u32 regval;
-
+               pa->core = core;
                pa->ver_ops = &pmic_arb_v2;
 
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -847,24 +915,14 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                        goto err_put_ctrl;
                }
 
-               pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
-                                       PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
+               pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+                                               PMIC_ARB_MAX_PPID,
+                                               sizeof(*pa->ppid_to_chan),
+                                               GFP_KERNEL);
                if (!pa->ppid_to_chan) {
                        err = -ENOMEM;
                        goto err_put_ctrl;
                }
-               /*
-                * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-                * ppid_to_chan is an in-memory invert of that table.
-                */
-               for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
-                       regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
-                       if (!regval)
-                               continue;
-
-                       ppid = (regval >> 8) & 0xFFF;
-                       pa->ppid_to_chan[ppid] = chan;
-               }
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
@@ -915,9 +973,20 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 
        pa->ee = ee;
 
-       for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
-               pa->mapping_table[i] = readl_relaxed(
-                               pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+       pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
+                                           sizeof(*pa->apid_to_ppid),
+                                           GFP_KERNEL);
+       if (!pa->apid_to_ppid) {
+               err = -ENOMEM;
+               goto err_put_ctrl;
+       }
+
+       pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+                                       sizeof(*pa->mapping_table), GFP_KERNEL);
+       if (!pa->mapping_table) {
+               err = -ENOMEM;
+               goto err_put_ctrl;
+       }
 
        /* Initialize max_apid/min_apid to the opposite bounds, during
         * the irq domain translation, we are sure to update these */
index cde5ff7529eb28b26e684eea53f4e168361937ff..d1a750760cf30b270b44929ac91eb22971d15b22 100644 (file)
@@ -613,9 +613,10 @@ out:
        return err;
 }
 
-static int ssb_bus_register(struct ssb_bus *bus,
-                           ssb_invariants_func_t get_invariants,
-                           unsigned long baseaddr)
+static int __maybe_unused
+ssb_bus_register(struct ssb_bus *bus,
+                ssb_invariants_func_t get_invariants,
+                unsigned long baseaddr)
 {
        int err;
 
index 58d4517e18369f984fd1fac9bfe53b9cc9ba4a3a..b9519be90fdae94ad5dbd278a336216de2971ce8 100644 (file)
@@ -6,6 +6,7 @@ menu "Analog to digital converters"
 config AD7606
        tristate "Analog Devices AD7606 ADC driver"
        depends on GPIOLIB || COMPILE_TEST
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index f129039bece3c74b96fb78e70996ea65279502ea..69287108f793bcb81bcf91451b64f1f4f86c2257 100644 (file)
@@ -217,8 +217,12 @@ error_ret:
 static int ade7753_reset(struct device *dev)
 {
        u16 val;
+       int ret;
+
+       ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+       if (ret)
+               return ret;
 
-       ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
        val |= BIT(6); /* Software Chip Reset */
 
        return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
@@ -343,8 +347,12 @@ error_ret:
 static int ade7753_stop_device(struct device *dev)
 {
        u16 val;
+       int ret;
+
+       ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+       if (ret)
+               return ret;
 
-       ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
        val |= BIT(4);  /* AD converters can be turned off */
 
        return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
index 7b7e7b26c1e8c525699c04451302586a36bc001a..3cc9be776f8b7a658acc0c1565afed3b39ced7ca 100644 (file)
@@ -538,7 +538,7 @@ struct vpfe_isif_raw_config {
 };
 
 /**********************************************************************
-      IPIPE API Structures
+*      IPIPE API Structures
 **********************************************************************/
 
 /* IPIPE module configurations */
index d009bcb439f0677c977d0af1bc3315797c2bf9fa..68ede6c56e6d2207da0752c38244a70655ed739d 100644 (file)
@@ -193,7 +193,7 @@ static int lirc_claim(void)
                        return 0;
                }
        }
-       out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP);
+       out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP);
        is_claimed = 1;
        return 1;
 }
@@ -264,7 +264,7 @@ static void lirc_lirc_irq_handler(void *blah)
                init = 1;
        }
 
-       timeout = timer/10;     /* timeout after 1/10 sec. */
+       timeout = timer / 10;   /* timeout after 1/10 sec. */
        signal = 1;
        level = lirc_get_timer();
        do {
@@ -286,15 +286,15 @@ static void lirc_lirc_irq_handler(void *blah)
                /* adjust value to usecs */
                __u64 helper;
 
-               helper = ((__u64) signal)*1000000;
+               helper = ((__u64)signal) * 1000000;
                do_div(helper, timer);
-               signal = (long) helper;
+               signal = (long)helper;
 
                if (signal > LIRC_SFH506_DELAY)
                        data = signal - LIRC_SFH506_DELAY;
                else
                        data = 1;
-               rbuf_write(PULSE_BIT|data); /* pulse */
+               rbuf_write(PULSE_BIT | data); /* pulse */
        }
        lastkt = ktime_get();
 #else
@@ -331,7 +331,7 @@ static ssize_t lirc_read(struct file *filep, char __user *buf, size_t n,
        set_current_state(TASK_INTERRUPTIBLE);
        while (count < n) {
                if (rptr != wptr) {
-                       if (copy_to_user(buf+count, &rbuf[rptr],
+                       if (copy_to_user(buf + count, &rbuf[rptr],
                                         sizeof(int))) {
                                result = -EFAULT;
                                break;
@@ -393,9 +393,9 @@ static ssize_t lirc_write(struct file *filep, const char __user *buf, size_t n,
        for (i = 0; i < count; i++) {
                __u64 helper;
 
-               helper = ((__u64) wbuf[i])*timer;
+               helper = ((__u64)wbuf[i]) * timer;
                do_div(helper, 1000000);
-               wbuf[i] = (int) helper;
+               wbuf[i] = (int)helper;
        }
 
        local_irq_save(flags);
@@ -647,7 +647,7 @@ static int __init lirc_parallel_init(void)
                goto exit_device_put;
 
        pport = parport_find_base(io);
-       if (pport == NULL) {
+       if (!pport) {
                pr_notice("no port at %x found\n", io);
                result = -ENXIO;
                goto exit_device_put;
@@ -656,7 +656,7 @@ static int __init lirc_parallel_init(void)
                                           pf, kf, lirc_lirc_irq_handler, 0,
                                           NULL);
        parport_put_port(pport);
-       if (ppdevice == NULL) {
+       if (!ppdevice) {
                pr_notice("parport_register_device() failed\n");
                result = -ENXIO;
                goto exit_device_put;
@@ -664,7 +664,7 @@ static int __init lirc_parallel_init(void)
        if (parport_claim(ppdevice) != 0)
                goto skip_init;
        is_claimed = 1;
-       out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP);
+       out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP);
 
 #ifdef LIRC_TIMER
        if (debug)
@@ -730,7 +730,7 @@ module_param(irq, int, S_IRUGO);
 MODULE_PARM_DESC(irq, "Interrupt (7 or 5)");
 
 module_param(tx_mask, int, S_IRUGO);
-MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)");
+MODULE_PARM_DESC(tx_mask, "Transmitter mask (default: 0x01)");
 
 module_param(debug, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Enable debugging messages");
index ae62975cf44a7bb4fa37ba008191c6e6968666b2..457dc7ffdaf1b5f19de7303bfe92cbb307da22fa 100644 (file)
@@ -78,7 +78,6 @@
 #define BL_ALL_UNLOCKED    0
 
 struct spinand_info {
-       struct nand_ecclayout *ecclayout;
        struct spi_device *spi;
        void *priv;
 };
index 79ac19246548afb5a6e5574a97d09dd7cb8ca110..70b8f4fabfad31af474fae8b7943f7a1404cbb8f 100644 (file)
@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
        lcd_send_serial(0x1F);  /* R/W=W, RS=0 */
        lcd_send_serial(cmd & 0x0F);
        lcd_send_serial((cmd >> 4) & 0x0F);
-       /* the shortest command takes at least 40 us */
-       usleep_range(40, 100);
+       udelay(40);             /* the shortest command takes at least 40 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
        lcd_send_serial(0x5F);  /* R/W=W, RS=1 */
        lcd_send_serial(data & 0x0F);
        lcd_send_serial((data >> 4) & 0x0F);
-       /* the shortest data takes at least 40 us */
-       usleep_range(40, 100);
+       udelay(40);             /* the shortest data takes at least 40 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, cmd);
-       /* maintain the data during 20 us before the strobe */
-       usleep_range(20, 100);
+       udelay(20);     /* maintain the data during 20 us before the strobe */
 
        bits.e = BIT_SET;
        bits.rs = BIT_CLR;
        bits.rw = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(40, 100);  /* maintain the strobe during 40 us */
+       udelay(40);     /* maintain the strobe during 40 us */
 
        bits.e = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(120, 500); /* the shortest command takes at least 120 us */
+       udelay(120);    /* the shortest command takes at least 120 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, data);
-       /* maintain the data during 20 us before the strobe */
-       usleep_range(20, 100);
+       udelay(20);     /* maintain the data during 20 us before the strobe */
 
        bits.e = BIT_SET;
        bits.rs = BIT_SET;
        bits.rw = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(40, 100);  /* maintain the strobe during 40 us */
+       udelay(40);     /* maintain the strobe during 40 us */
 
        bits.e = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(45, 100);  /* the shortest data takes at least 45 us */
+       udelay(45);     /* the shortest data takes at least 45 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
        spin_lock_irq(&pprt_lock);
        /* present the data to the control port */
        w_ctr(pprt, cmd);
-       usleep_range(60, 120);
+       udelay(60);
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, data);
-       usleep_range(60, 120);
+       udelay(60);
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
                lcd_send_serial(0x5F);  /* R/W=W, RS=1 */
                lcd_send_serial(' ' & 0x0F);
                lcd_send_serial((' ' >> 4) & 0x0F);
-               usleep_range(40, 100);  /* the shortest data takes at least 40 us */
+               udelay(40);     /* the shortest data takes at least 40 us */
        }
        spin_unlock_irq(&pprt_lock);
 
@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
                w_dtr(pprt, ' ');
 
                /* maintain the data during 20 us before the strobe */
-               usleep_range(20, 100);
+               udelay(20);
 
                bits.e = BIT_SET;
                bits.rs = BIT_SET;
@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
                set_ctrl_bits();
 
                /* maintain the strobe during 40 us */
-               usleep_range(40, 100);
+               udelay(40);
 
                bits.e = BIT_CLR;
                set_ctrl_bits();
 
                /* the shortest data takes at least 45 us */
-               usleep_range(45, 100);
+               udelay(45);
        }
        spin_unlock_irq(&pprt_lock);
 
@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
        for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
                /* present the data to the data port */
                w_dtr(pprt, ' ');
-               usleep_range(60, 120);
+               udelay(60);
        }
 
        spin_unlock_irq(&pprt_lock);
index ba8765063174c4e05b517cdf45d33c6c5f57cf25..f1f3ecadf0fb0d0e1a0bb11d014d766e1187e55c 100644 (file)
@@ -22,12 +22,6 @@ menuconfig STAGING_RDMA
 # Please keep entries in alphabetic order
 if STAGING_RDMA
 
-source "drivers/staging/rdma/amso1100/Kconfig"
-
-source "drivers/staging/rdma/ehca/Kconfig"
-
 source "drivers/staging/rdma/hfi1/Kconfig"
 
-source "drivers/staging/rdma/ipath/Kconfig"
-
 endif
index 139d78ef2c24388b93ed6a008090190cff6de663..8c7fc1de48a7bf6f86f103ad120f9052ea399c58 100644 (file)
@@ -1,5 +1,2 @@
 # Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_AMSO1100)      += amso1100/
-obj-$(CONFIG_INFINIBAND_EHCA)  += ehca/
 obj-$(CONFIG_INFINIBAND_HFI1)  += hfi1/
-obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/staging/rdma/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
deleted file mode 100644 (file)
index 950dfab..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
-
-iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
-       c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/staging/rdma/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
deleted file mode 100644 (file)
index e6ce5f2..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-config INFINIBAND_AMSO1100
-       tristate "Ammasso 1100 HCA support"
-       depends on PCI && INET
-       ---help---
-         This is a low-level driver for the Ammasso 1100 host
-         channel adapter (HCA).
-
-config INFINIBAND_AMSO1100_DEBUG
-       bool "Verbose debugging output"
-       depends on INFINIBAND_AMSO1100
-       default n
-       ---help---
-         This option causes the amso1100 driver to produce a bunch of
-         debug messages.  Select this if you are developing the driver
-         or trying to diagnose a problem.
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
deleted file mode 100644 (file)
index 18b00a5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-7/2015
-
-The amso1100 driver has been deprecated and moved to drivers/staging.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
deleted file mode 100644 (file)
index b46ebd1..0000000
+++ /dev/null
@@ -1,1240 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_provider.h"
-
-MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
-MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
-    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int debug = -1;         /* defaults above */
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static int c2_up(struct net_device *netdev);
-static int c2_down(struct net_device *netdev);
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-static void c2_tx_interrupt(struct net_device *netdev);
-static void c2_rx_interrupt(struct net_device *netdev);
-static irqreturn_t c2_interrupt(int irq, void *dev_id);
-static void c2_tx_timeout(struct net_device *netdev);
-static int c2_change_mtu(struct net_device *netdev, int new_mtu);
-static void c2_reset(struct c2_port *c2_port);
-
-static struct pci_device_id c2_pci_table[] = {
-       { PCI_DEVICE(0x18b8, 0xb001) },
-       { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, c2_pci_table);
-
-static void c2_set_rxbufsize(struct c2_port *c2_port)
-{
-       struct net_device *netdev = c2_port->netdev;
-
-       if (netdev->mtu > RX_BUF_SIZE)
-               c2_port->rx_buf_size =
-                   netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
-                   NET_IP_ALIGN;
-       else
-               c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
-}
-
-/*
- * Allocate TX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
-                           dma_addr_t base, void __iomem * mmio_txp_ring)
-{
-       struct c2_tx_desc *tx_desc;
-       struct c2_txp_desc __iomem *txp_desc;
-       struct c2_element *elem;
-       int i;
-
-       tx_ring->start = kmalloc_array(tx_ring->count, sizeof(*elem),
-                                      GFP_KERNEL);
-       if (!tx_ring->start)
-               return -ENOMEM;
-
-       elem = tx_ring->start;
-       tx_desc = vaddr;
-       txp_desc = mmio_txp_ring;
-       for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
-               tx_desc->len = 0;
-               tx_desc->status = 0;
-
-               /* Set TXP_HTXD_UNINIT */
-               __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
-                            (void __iomem *) txp_desc + C2_TXP_ADDR);
-               __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
-               __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
-                            (void __iomem *) txp_desc + C2_TXP_FLAGS);
-
-               elem->skb = NULL;
-               elem->ht_desc = tx_desc;
-               elem->hw_desc = txp_desc;
-
-               if (i == tx_ring->count - 1) {
-                       elem->next = tx_ring->start;
-                       tx_desc->next_offset = base;
-               } else {
-                       elem->next = elem + 1;
-                       tx_desc->next_offset =
-                           base + (i + 1) * sizeof(*tx_desc);
-               }
-       }
-
-       tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
-
-       return 0;
-}
-
-/*
- * Allocate RX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
-                           dma_addr_t base, void __iomem * mmio_rxp_ring)
-{
-       struct c2_rx_desc *rx_desc;
-       struct c2_rxp_desc __iomem *rxp_desc;
-       struct c2_element *elem;
-       int i;
-
-       rx_ring->start = kmalloc_array(rx_ring->count, sizeof(*elem),
-                                      GFP_KERNEL);
-       if (!rx_ring->start)
-               return -ENOMEM;
-
-       elem = rx_ring->start;
-       rx_desc = vaddr;
-       rxp_desc = mmio_rxp_ring;
-       for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
-               rx_desc->len = 0;
-               rx_desc->status = 0;
-
-               /* Set RXP_HRXD_UNINIT */
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
-                      (void __iomem *) rxp_desc + C2_RXP_STATUS);
-               __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
-               __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
-               __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
-                            (void __iomem *) rxp_desc + C2_RXP_ADDR);
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
-                            (void __iomem *) rxp_desc + C2_RXP_FLAGS);
-
-               elem->skb = NULL;
-               elem->ht_desc = rx_desc;
-               elem->hw_desc = rxp_desc;
-
-               if (i == rx_ring->count - 1) {
-                       elem->next = rx_ring->start;
-                       rx_desc->next_offset = base;
-               } else {
-                       elem->next = elem + 1;
-                       rx_desc->next_offset =
-                           base + (i + 1) * sizeof(*rx_desc);
-               }
-       }
-
-       rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
-
-       return 0;
-}
-
-/* Setup buffer for receiving */
-static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
-{
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_rx_desc *rx_desc = elem->ht_desc;
-       struct sk_buff *skb;
-       dma_addr_t mapaddr;
-       u32 maplen;
-       struct c2_rxp_hdr *rxp_hdr;
-
-       skb = dev_alloc_skb(c2_port->rx_buf_size);
-       if (unlikely(!skb)) {
-               pr_debug("%s: out of memory for receive\n",
-                       c2_port->netdev->name);
-               return -ENOMEM;
-       }
-
-       /* Zero out the rxp hdr in the sk_buff */
-       memset(skb->data, 0, sizeof(*rxp_hdr));
-
-       skb->dev = c2_port->netdev;
-
-       maplen = c2_port->rx_buf_size;
-       mapaddr =
-           pci_map_single(c2dev->pcidev, skb->data, maplen,
-                          PCI_DMA_FROMDEVICE);
-
-       /* Set the sk_buff RXP_header to RXP_HRXD_READY */
-       rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-       rxp_hdr->flags = RXP_HRXD_READY;
-
-       __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-       __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
-                    elem->hw_desc + C2_RXP_LEN);
-       __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
-       __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-                    elem->hw_desc + C2_RXP_FLAGS);
-
-       elem->skb = skb;
-       elem->mapaddr = mapaddr;
-       elem->maplen = maplen;
-       rx_desc->len = maplen;
-
-       return 0;
-}
-
-/*
- * Allocate buffers for the Rx ring
- * For receive:  rx_ring.to_clean is next received frame
- */
-static int c2_rx_fill(struct c2_port *c2_port)
-{
-       struct c2_ring *rx_ring = &c2_port->rx_ring;
-       struct c2_element *elem;
-       int ret = 0;
-
-       elem = rx_ring->start;
-       do {
-               if (c2_rx_alloc(c2_port, elem)) {
-                       ret = 1;
-                       break;
-               }
-       } while ((elem = elem->next) != rx_ring->start);
-
-       rx_ring->to_clean = rx_ring->start;
-       return ret;
-}
-
-/* Free all buffers in RX ring, assumes receiver stopped */
-static void c2_rx_clean(struct c2_port *c2_port)
-{
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *rx_ring = &c2_port->rx_ring;
-       struct c2_element *elem;
-       struct c2_rx_desc *rx_desc;
-
-       elem = rx_ring->start;
-       do {
-               rx_desc = elem->ht_desc;
-               rx_desc->len = 0;
-
-               __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-               __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-               __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
-               __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
-                            elem->hw_desc + C2_RXP_ADDR);
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
-                            elem->hw_desc + C2_RXP_FLAGS);
-
-               if (elem->skb) {
-                       pci_unmap_single(c2dev->pcidev, elem->mapaddr,
-                                        elem->maplen, PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb(elem->skb);
-                       elem->skb = NULL;
-               }
-       } while ((elem = elem->next) != rx_ring->start);
-}
-
-static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
-{
-       struct c2_tx_desc *tx_desc = elem->ht_desc;
-
-       tx_desc->len = 0;
-
-       pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
-                        PCI_DMA_TODEVICE);
-
-       if (elem->skb) {
-               dev_kfree_skb_any(elem->skb);
-               elem->skb = NULL;
-       }
-
-       return 0;
-}
-
-/* Free all buffers in TX ring, assumes transmitter stopped */
-static void c2_tx_clean(struct c2_port *c2_port)
-{
-       struct c2_ring *tx_ring = &c2_port->tx_ring;
-       struct c2_element *elem;
-       struct c2_txp_desc txp_htxd;
-       int retry;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c2_port->tx_lock, flags);
-
-       elem = tx_ring->start;
-
-       do {
-               retry = 0;
-               do {
-                       txp_htxd.flags =
-                           readw(elem->hw_desc + C2_TXP_FLAGS);
-
-                       if (txp_htxd.flags == TXP_HTXD_READY) {
-                               retry = 1;
-                               __raw_writew(0,
-                                            elem->hw_desc + C2_TXP_LEN);
-                               __raw_writeq(0,
-                                            elem->hw_desc + C2_TXP_ADDR);
-                               __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
-                                            elem->hw_desc + C2_TXP_FLAGS);
-                               c2_port->netdev->stats.tx_dropped++;
-                               break;
-                       } else {
-                               __raw_writew(0,
-                                            elem->hw_desc + C2_TXP_LEN);
-                               __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
-                                            elem->hw_desc + C2_TXP_ADDR);
-                               __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
-                                            elem->hw_desc + C2_TXP_FLAGS);
-                       }
-
-                       c2_tx_free(c2_port->c2dev, elem);
-
-               } while ((elem = elem->next) != tx_ring->start);
-       } while (retry);
-
-       c2_port->tx_avail = c2_port->tx_ring.count - 1;
-       c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
-
-       if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
-               netif_wake_queue(c2_port->netdev);
-
-       spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-}
-
-/*
- * Process transmit descriptors marked 'DONE' by the firmware,
- * freeing up their unneeded sk_buffs.
- */
-static void c2_tx_interrupt(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *tx_ring = &c2_port->tx_ring;
-       struct c2_element *elem;
-       struct c2_txp_desc txp_htxd;
-
-       spin_lock(&c2_port->tx_lock);
-
-       for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
-            elem = elem->next) {
-               txp_htxd.flags =
-                   be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
-
-               if (txp_htxd.flags != TXP_HTXD_DONE)
-                       break;
-
-               if (netif_msg_tx_done(c2_port)) {
-                       /* PCI reads are expensive in fast path */
-                       txp_htxd.len =
-                           be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
-                       pr_debug("%s: tx done slot %3Zu status 0x%x len "
-                               "%5u bytes\n",
-                               netdev->name, elem - tx_ring->start,
-                               txp_htxd.flags, txp_htxd.len);
-               }
-
-               c2_tx_free(c2dev, elem);
-               ++(c2_port->tx_avail);
-       }
-
-       tx_ring->to_clean = elem;
-
-       if (netif_queue_stopped(netdev)
-           && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
-               netif_wake_queue(netdev);
-
-       spin_unlock(&c2_port->tx_lock);
-}
-
-static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
-{
-       struct c2_rx_desc *rx_desc = elem->ht_desc;
-       struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-
-       if (rxp_hdr->status != RXP_HRXD_OK ||
-           rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
-               pr_debug("BAD RXP_HRXD\n");
-               pr_debug("  rx_desc : %p\n", rx_desc);
-               pr_debug("    index : %Zu\n",
-                       elem - c2_port->rx_ring.start);
-               pr_debug("    len   : %u\n", rx_desc->len);
-               pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,
-                       (void *) __pa((unsigned long) rxp_hdr));
-               pr_debug("    flags : 0x%x\n", rxp_hdr->flags);
-               pr_debug("    status: 0x%x\n", rxp_hdr->status);
-               pr_debug("    len   : %u\n", rxp_hdr->len);
-               pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);
-       }
-
-       /* Setup the skb for reuse since we're dropping this pkt */
-       elem->skb->data = elem->skb->head;
-       skb_reset_tail_pointer(elem->skb);
-
-       /* Zero out the rxp hdr in the sk_buff */
-       memset(elem->skb->data, 0, sizeof(*rxp_hdr));
-
-       /* Write the descriptor to the adapter's rx ring */
-       __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-       __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-       __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
-                    elem->hw_desc + C2_RXP_LEN);
-       __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
-                    elem->hw_desc + C2_RXP_ADDR);
-       __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-                    elem->hw_desc + C2_RXP_FLAGS);
-
-       pr_debug("packet dropped\n");
-       c2_port->netdev->stats.rx_dropped++;
-}
-
-static void c2_rx_interrupt(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *rx_ring = &c2_port->rx_ring;
-       struct c2_element *elem;
-       struct c2_rx_desc *rx_desc;
-       struct c2_rxp_hdr *rxp_hdr;
-       struct sk_buff *skb;
-       dma_addr_t mapaddr;
-       u32 maplen, buflen;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c2dev->lock, flags);
-
-       /* Begin where we left off */
-       rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
-
-       for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
-            elem = elem->next) {
-               rx_desc = elem->ht_desc;
-               mapaddr = elem->mapaddr;
-               maplen = elem->maplen;
-               skb = elem->skb;
-               rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-
-               if (rxp_hdr->flags != RXP_HRXD_DONE)
-                       break;
-               buflen = rxp_hdr->len;
-
-               /* Sanity check the RXP header */
-               if (rxp_hdr->status != RXP_HRXD_OK ||
-                   buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
-                       c2_rx_error(c2_port, elem);
-                       continue;
-               }
-
-               /*
-                * Allocate and map a new skb for replenishing the host
-                * RX desc
-                */
-               if (c2_rx_alloc(c2_port, elem)) {
-                       c2_rx_error(c2_port, elem);
-                       continue;
-               }
-
-               /* Unmap the old skb */
-               pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
-                                PCI_DMA_FROMDEVICE);
-
-               prefetch(skb->data);
-
-               /*
-                * Skip past the leading 8 bytes comprising of the
-                * "struct c2_rxp_hdr", prepended by the adapter
-                * to the usual Ethernet header ("struct ethhdr"),
-                * to the start of the raw Ethernet packet.
-                *
-                * Fix up the various fields in the sk_buff before
-                * passing it up to netif_rx(). The transfer size
-                * (in bytes) specified by the adapter len field of
-                * the "struct rxp_hdr_t" does NOT include the
-                * "sizeof(struct c2_rxp_hdr)".
-                */
-               skb->data += sizeof(*rxp_hdr);
-               skb_set_tail_pointer(skb, buflen);
-               skb->len = buflen;
-               skb->protocol = eth_type_trans(skb, netdev);
-
-               netif_rx(skb);
-
-               netdev->stats.rx_packets++;
-               netdev->stats.rx_bytes += buflen;
-       }
-
-       /* Save where we left off */
-       rx_ring->to_clean = elem;
-       c2dev->cur_rx = elem - rx_ring->start;
-       C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
-       spin_unlock_irqrestore(&c2dev->lock, flags);
-}
-
-/*
- * Handle netisr0 TX & RX interrupts.
- */
-static irqreturn_t c2_interrupt(int irq, void *dev_id)
-{
-       unsigned int netisr0, dmaisr;
-       int handled = 0;
-       struct c2_dev *c2dev = dev_id;
-
-       /* Process CCILNET interrupts */
-       netisr0 = readl(c2dev->regs + C2_NISR0);
-       if (netisr0) {
-
-               /*
-                * There is an issue with the firmware that always
-                * provides the status of RX for both TX & RX
-                * interrupts.  So process both queues here.
-                */
-               c2_rx_interrupt(c2dev->netdev);
-               c2_tx_interrupt(c2dev->netdev);
-
-               /* Clear the interrupt */
-               writel(netisr0, c2dev->regs + C2_NISR0);
-               handled++;
-       }
-
-       /* Process RNIC interrupts */
-       dmaisr = readl(c2dev->regs + C2_DISR);
-       if (dmaisr) {
-               writel(dmaisr, c2dev->regs + C2_DISR);
-               c2_rnic_interrupt(c2dev);
-               handled++;
-       }
-
-       if (handled) {
-               return IRQ_HANDLED;
-       } else {
-               return IRQ_NONE;
-       }
-}
-
-static int c2_up(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_element *elem;
-       struct c2_rxp_hdr *rxp_hdr;
-       struct in_device *in_dev;
-       size_t rx_size, tx_size;
-       int ret, i;
-       unsigned int netimr0;
-
-       if (netif_msg_ifup(c2_port))
-               pr_debug("%s: enabling interface\n", netdev->name);
-
-       /* Set the Rx buffer size based on MTU */
-       c2_set_rxbufsize(c2_port);
-
-       /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
-       rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
-       tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
-
-       c2_port->mem_size = tx_size + rx_size;
-       c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
-                                            &c2_port->dma);
-       if (c2_port->mem == NULL) {
-               pr_debug("Unable to allocate memory for "
-                       "host descriptor rings\n");
-               return -ENOMEM;
-       }
-
-       /* Create the Rx host descriptor ring */
-       if ((ret =
-            c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
-                             c2dev->mmio_rxp_ring))) {
-               pr_debug("Unable to create RX ring\n");
-               goto bail0;
-       }
-
-       /* Allocate Rx buffers for the host descriptor ring */
-       if (c2_rx_fill(c2_port)) {
-               pr_debug("Unable to fill RX ring\n");
-               goto bail1;
-       }
-
-       /* Create the Tx host descriptor ring */
-       if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
-                                   c2_port->dma + rx_size,
-                                   c2dev->mmio_txp_ring))) {
-               pr_debug("Unable to create TX ring\n");
-               goto bail1;
-       }
-
-       /* Set the TX pointer to where we left off */
-       c2_port->tx_avail = c2_port->tx_ring.count - 1;
-       c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
-           c2_port->tx_ring.start + c2dev->cur_tx;
-
-       /* missing: Initialize MAC */
-
-       BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
-
-       /* Reset the adapter, ensures the driver is in sync with the RXP */
-       c2_reset(c2_port);
-
-       /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
-       for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
-            i++, elem++) {
-               rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-               rxp_hdr->flags = 0;
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-                            elem->hw_desc + C2_RXP_FLAGS);
-       }
-
-       /* Enable network packets */
-       netif_start_queue(netdev);
-
-       /* Enable IRQ */
-       writel(0, c2dev->regs + C2_IDIS);
-       netimr0 = readl(c2dev->regs + C2_NIMR0);
-       netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
-       writel(netimr0, c2dev->regs + C2_NIMR0);
-
-       /* Tell the stack to ignore arp requests for ipaddrs bound to
-        * other interfaces.  This is needed to prevent the host stack
-        * from responding to arp requests to the ipaddr bound on the
-        * rdma interface.
-        */
-       in_dev = in_dev_get(netdev);
-       IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
-       in_dev_put(in_dev);
-
-       return 0;
-
-bail1:
-       c2_rx_clean(c2_port);
-       kfree(c2_port->rx_ring.start);
-
-bail0:
-       pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
-                           c2_port->dma);
-
-       return ret;
-}
-
-static int c2_down(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-
-       if (netif_msg_ifdown(c2_port))
-               pr_debug("%s: disabling interface\n",
-                       netdev->name);
-
-       /* Wait for all the queued packets to get sent */
-       c2_tx_interrupt(netdev);
-
-       /* Disable network packets */
-       netif_stop_queue(netdev);
-
-       /* Disable IRQs by clearing the interrupt mask */
-       writel(1, c2dev->regs + C2_IDIS);
-       writel(0, c2dev->regs + C2_NIMR0);
-
-       /* missing: Stop transmitter */
-
-       /* missing: Stop receiver */
-
-       /* Reset the adapter, ensures the driver is in sync with the RXP */
-       c2_reset(c2_port);
-
-       /* missing: Turn off LEDs here */
-
-       /* Free all buffers in the host descriptor rings */
-       c2_tx_clean(c2_port);
-       c2_rx_clean(c2_port);
-
-       /* Free the host descriptor rings */
-       kfree(c2_port->rx_ring.start);
-       kfree(c2_port->tx_ring.start);
-       pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
-                           c2_port->dma);
-
-       return 0;
-}
-
-static void c2_reset(struct c2_port *c2_port)
-{
-       struct c2_dev *c2dev = c2_port->c2dev;
-       unsigned int cur_rx = c2dev->cur_rx;
-
-       /* Tell the hardware to quiesce */
-       C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
-
-       /*
-        * The hardware will reset the C2_PCI_HRX_QUI bit once
-        * the RXP is quiesced.  Wait 2 seconds for this.
-        */
-       ssleep(2);
-
-       cur_rx = C2_GET_CUR_RX(c2dev);
-
-       if (cur_rx & C2_PCI_HRX_QUI)
-               pr_debug("c2_reset: failed to quiesce the hardware!\n");
-
-       cur_rx &= ~C2_PCI_HRX_QUI;
-
-       c2dev->cur_rx = cur_rx;
-
-       pr_debug("Current RX: %u\n", c2dev->cur_rx);
-}
-
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *tx_ring = &c2_port->tx_ring;
-       struct c2_element *elem;
-       dma_addr_t mapaddr;
-       u32 maplen;
-       unsigned long flags;
-       unsigned int i;
-
-       spin_lock_irqsave(&c2_port->tx_lock, flags);
-
-       if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
-               netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
-               pr_debug("%s: Tx ring full when queue awake!\n",
-                       netdev->name);
-               return NETDEV_TX_BUSY;
-       }
-
-       maplen = skb_headlen(skb);
-       mapaddr =
-           pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
-
-       elem = tx_ring->to_use;
-       elem->skb = skb;
-       elem->mapaddr = mapaddr;
-       elem->maplen = maplen;
-
-       /* Tell HW to xmit */
-       __raw_writeq((__force u64) cpu_to_be64(mapaddr),
-                    elem->hw_desc + C2_TXP_ADDR);
-       __raw_writew((__force u16) cpu_to_be16(maplen),
-                    elem->hw_desc + C2_TXP_LEN);
-       __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
-                    elem->hw_desc + C2_TXP_FLAGS);
-
-       netdev->stats.tx_packets++;
-       netdev->stats.tx_bytes += maplen;
-
-       /* Loop thru additional data fragments and queue them */
-       if (skb_shinfo(skb)->nr_frags) {
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-                       maplen = skb_frag_size(frag);
-                       mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
-                                                  0, maplen, DMA_TO_DEVICE);
-                       elem = elem->next;
-                       elem->skb = NULL;
-                       elem->mapaddr = mapaddr;
-                       elem->maplen = maplen;
-
-                       /* Tell HW to xmit */
-                       __raw_writeq((__force u64) cpu_to_be64(mapaddr),
-                                    elem->hw_desc + C2_TXP_ADDR);
-                       __raw_writew((__force u16) cpu_to_be16(maplen),
-                                    elem->hw_desc + C2_TXP_LEN);
-                       __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
-                                    elem->hw_desc + C2_TXP_FLAGS);
-
-                       netdev->stats.tx_packets++;
-                       netdev->stats.tx_bytes += maplen;
-               }
-       }
-
-       tx_ring->to_use = elem->next;
-       c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
-
-       if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
-               netif_stop_queue(netdev);
-               if (netif_msg_tx_queued(c2_port))
-                       pr_debug("%s: transmit queue full\n",
-                               netdev->name);
-       }
-
-       spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
-       netdev->trans_start = jiffies;
-
-       return NETDEV_TX_OK;
-}
-
-static void c2_tx_timeout(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-
-       if (netif_msg_timer(c2_port))
-               pr_debug("%s: tx timeout\n", netdev->name);
-
-       c2_tx_clean(c2_port);
-}
-
-static int c2_change_mtu(struct net_device *netdev, int new_mtu)
-{
-       int ret = 0;
-
-       if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
-               return -EINVAL;
-
-       netdev->mtu = new_mtu;
-
-       if (netif_running(netdev)) {
-               c2_down(netdev);
-
-               c2_up(netdev);
-       }
-
-       return ret;
-}
-
-static const struct net_device_ops c2_netdev = {
-       .ndo_open               = c2_up,
-       .ndo_stop               = c2_down,
-       .ndo_start_xmit         = c2_xmit_frame,
-       .ndo_tx_timeout         = c2_tx_timeout,
-       .ndo_change_mtu         = c2_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-/* Initialize network device */
-static struct net_device *c2_devinit(struct c2_dev *c2dev,
-                                    void __iomem * mmio_addr)
-{
-       struct c2_port *c2_port = NULL;
-       struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
-
-       if (!netdev) {
-               pr_debug("c2_port etherdev alloc failed");
-               return NULL;
-       }
-
-       SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
-       netdev->netdev_ops = &c2_netdev;
-       netdev->watchdog_timeo = C2_TX_TIMEOUT;
-       netdev->irq = c2dev->pcidev->irq;
-
-       c2_port = netdev_priv(netdev);
-       c2_port->netdev = netdev;
-       c2_port->c2dev = c2dev;
-       c2_port->msg_enable = netif_msg_init(debug, default_msg);
-       c2_port->tx_ring.count = C2_NUM_TX_DESC;
-       c2_port->rx_ring.count = C2_NUM_RX_DESC;
-
-       spin_lock_init(&c2_port->tx_lock);
-
-       /* Copy our 48-bit ethernet hardware address */
-       memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
-
-       /* Validate the MAC address */
-       if (!is_valid_ether_addr(netdev->dev_addr)) {
-               pr_debug("Invalid MAC Address\n");
-               pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name,
-                        netdev->dev_addr, netdev->irq);
-               free_netdev(netdev);
-               return NULL;
-       }
-
-       c2dev->netdev = netdev;
-
-       return netdev;
-}
-
-static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
-{
-       int ret = 0, i;
-       unsigned long reg0_start, reg0_flags, reg0_len;
-       unsigned long reg2_start, reg2_flags, reg2_len;
-       unsigned long reg4_start, reg4_flags, reg4_len;
-       unsigned kva_map_size;
-       struct net_device *netdev = NULL;
-       struct c2_dev *c2dev = NULL;
-       void __iomem *mmio_regs = NULL;
-
-       printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
-               DRV_VERSION);
-
-       /* Enable PCI device */
-       ret = pci_enable_device(pcidev);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
-                       pci_name(pcidev));
-               goto bail0;
-       }
-
-       reg0_start = pci_resource_start(pcidev, BAR_0);
-       reg0_len = pci_resource_len(pcidev, BAR_0);
-       reg0_flags = pci_resource_flags(pcidev, BAR_0);
-
-       reg2_start = pci_resource_start(pcidev, BAR_2);
-       reg2_len = pci_resource_len(pcidev, BAR_2);
-       reg2_flags = pci_resource_flags(pcidev, BAR_2);
-
-       reg4_start = pci_resource_start(pcidev, BAR_4);
-       reg4_len = pci_resource_len(pcidev, BAR_4);
-       reg4_flags = pci_resource_flags(pcidev, BAR_4);
-
-       pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
-       pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
-       pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
-
-       /* Make sure PCI base addr are MMIO */
-       if (!(reg0_flags & IORESOURCE_MEM) ||
-           !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
-               printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
-               ret = -ENODEV;
-               goto bail1;
-       }
-
-       /* Check for weird/broken PCI region reporting */
-       if ((reg0_len < C2_REG0_SIZE) ||
-           (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
-               printk(KERN_ERR PFX "Invalid PCI region sizes\n");
-               ret = -ENODEV;
-               goto bail1;
-       }
-
-       /* Reserve PCI I/O and memory resources */
-       ret = pci_request_regions(pcidev, DRV_NAME);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: Unable to request regions\n",
-                       pci_name(pcidev));
-               goto bail1;
-       }
-
-       if ((sizeof(dma_addr_t) > 4)) {
-               ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
-               if (ret < 0) {
-                       printk(KERN_ERR PFX "64b DMA configuration failed\n");
-                       goto bail2;
-               }
-       } else {
-               ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-               if (ret < 0) {
-                       printk(KERN_ERR PFX "32b DMA configuration failed\n");
-                       goto bail2;
-               }
-       }
-
-       /* Enables bus-mastering on the device */
-       pci_set_master(pcidev);
-
-       /* Remap the adapter PCI registers in BAR4 */
-       mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
-                                   sizeof(struct c2_adapter_pci_regs));
-       if (!mmio_regs) {
-               printk(KERN_ERR PFX
-                       "Unable to remap adapter PCI registers in BAR4\n");
-               ret = -EIO;
-               goto bail2;
-       }
-
-       /* Validate PCI regs magic */
-       for (i = 0; i < sizeof(c2_magic); i++) {
-               if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
-                       printk(KERN_ERR PFX "Downlevel Firmware boot loader "
-                               "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
-                              "utility to update your boot loader\n",
-                               i + 1, sizeof(c2_magic),
-                               readb(mmio_regs + C2_REGS_MAGIC + i),
-                               c2_magic[i]);
-                       printk(KERN_ERR PFX "Adapter not claimed\n");
-                       iounmap(mmio_regs);
-                       ret = -EIO;
-                       goto bail2;
-               }
-       }
-
-       /* Validate the adapter version */
-       if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
-               printk(KERN_ERR PFX "Version mismatch "
-                       "[fw=%u, c2=%u], Adapter not claimed\n",
-                       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
-                       C2_VERSION);
-               ret = -EINVAL;
-               iounmap(mmio_regs);
-               goto bail2;
-       }
-
-       /* Validate the adapter IVN */
-       if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
-               printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
-                      "the OpenIB device support kit. "
-                      "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
-                      C2_IVN);
-               ret = -EINVAL;
-               iounmap(mmio_regs);
-               goto bail2;
-       }
-
-       /* Allocate hardware structure */
-       c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
-       if (!c2dev) {
-               printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
-                       pci_name(pcidev));
-               ret = -ENOMEM;
-               iounmap(mmio_regs);
-               goto bail2;
-       }
-
-       memset(c2dev, 0, sizeof(*c2dev));
-       spin_lock_init(&c2dev->lock);
-       c2dev->pcidev = pcidev;
-       c2dev->cur_tx = 0;
-
-       /* Get the last RX index */
-       c2dev->cur_rx =
-           (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
-            0xffffc000) / sizeof(struct c2_rxp_desc);
-
-       /* Request an interrupt line for the driver */
-       ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
-                       pci_name(pcidev), pcidev->irq);
-               iounmap(mmio_regs);
-               goto bail3;
-       }
-
-       /* Set driver specific data */
-       pci_set_drvdata(pcidev, c2dev);
-
-       /* Initialize network device */
-       if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
-               ret = -ENOMEM;
-               iounmap(mmio_regs);
-               goto bail4;
-       }
-
-       /* Save off the actual size prior to unmapping mmio_regs */
-       kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
-
-       /* Unmap the adapter PCI registers in BAR4 */
-       iounmap(mmio_regs);
-
-       /* Register network device */
-       ret = register_netdev(netdev);
-       if (ret) {
-               printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
-                       ret);
-               goto bail5;
-       }
-
-       /* Disable network packets */
-       netif_stop_queue(netdev);
-
-       /* Remap the adapter HRXDQ PA space to kernel VA space */
-       c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
-                                              C2_RXP_HRXDQ_SIZE);
-       if (!c2dev->mmio_rxp_ring) {
-               printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
-               ret = -EIO;
-               goto bail6;
-       }
-
-       /* Remap the adapter HTXDQ PA space to kernel VA space */
-       c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
-                                              C2_TXP_HTXDQ_SIZE);
-       if (!c2dev->mmio_txp_ring) {
-               printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
-               ret = -EIO;
-               goto bail7;
-       }
-
-       /* Save off the current RX index in the last 4 bytes of the TXP Ring */
-       C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
-       /* Remap the PCI registers in adapter BAR0 to kernel VA space */
-       c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
-       if (!c2dev->regs) {
-               printk(KERN_ERR PFX "Unable to remap BAR0\n");
-               ret = -EIO;
-               goto bail8;
-       }
-
-       /* Remap the PCI registers in adapter BAR4 to kernel VA space */
-       c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
-       c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
-                                    kva_map_size);
-       if (!c2dev->kva) {
-               printk(KERN_ERR PFX "Unable to remap BAR4\n");
-               ret = -EIO;
-               goto bail9;
-       }
-
-       /* Print out the MAC address */
-       pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr,
-                netdev->irq);
-
-       ret = c2_rnic_init(c2dev);
-       if (ret) {
-               printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
-               goto bail10;
-       }
-
-       ret = c2_register_device(c2dev);
-       if (ret)
-               goto bail10;
-
-       return 0;
-
- bail10:
-       iounmap(c2dev->kva);
-
- bail9:
-       iounmap(c2dev->regs);
-
- bail8:
-       iounmap(c2dev->mmio_txp_ring);
-
- bail7:
-       iounmap(c2dev->mmio_rxp_ring);
-
- bail6:
-       unregister_netdev(netdev);
-
- bail5:
-       free_netdev(netdev);
-
- bail4:
-       free_irq(pcidev->irq, c2dev);
-
- bail3:
-       ib_dealloc_device(&c2dev->ibdev);
-
- bail2:
-       pci_release_regions(pcidev);
-
- bail1:
-       pci_disable_device(pcidev);
-
- bail0:
-       return ret;
-}
-
-static void c2_remove(struct pci_dev *pcidev)
-{
-       struct c2_dev *c2dev = pci_get_drvdata(pcidev);
-       struct net_device *netdev = c2dev->netdev;
-
-       /* Unregister with OpenIB */
-       c2_unregister_device(c2dev);
-
-       /* Clean up the RNIC resources */
-       c2_rnic_term(c2dev);
-
-       /* Remove network device from the kernel */
-       unregister_netdev(netdev);
-
-       /* Free network device */
-       free_netdev(netdev);
-
-       /* Free the interrupt line */
-       free_irq(pcidev->irq, c2dev);
-
-       /* missing: Turn LEDs off here */
-
-       /* Unmap adapter PA space */
-       iounmap(c2dev->kva);
-       iounmap(c2dev->regs);
-       iounmap(c2dev->mmio_txp_ring);
-       iounmap(c2dev->mmio_rxp_ring);
-
-       /* Free the hardware structure */
-       ib_dealloc_device(&c2dev->ibdev);
-
-       /* Release reserved PCI I/O and memory resources */
-       pci_release_regions(pcidev);
-
-       /* Disable PCI device */
-       pci_disable_device(pcidev);
-
-       /* Clear driver specific data */
-       pci_set_drvdata(pcidev, NULL);
-}
-
-static struct pci_driver c2_pci_driver = {
-       .name = DRV_NAME,
-       .id_table = c2_pci_table,
-       .probe = c2_probe,
-       .remove = c2_remove,
-};
-
-module_pci_driver(c2_pci_driver);
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
deleted file mode 100644 (file)
index 21b565a..0000000
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __C2_H
-#define __C2_H
-
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-
-#include "c2_provider.h"
-#include "c2_mq.h"
-#include "c2_status.h"
-
-#define DRV_NAME     "c2"
-#define DRV_VERSION  "1.1"
-#define PFX          DRV_NAME ": "
-
-#define BAR_0                0
-#define BAR_2                2
-#define BAR_4                4
-
-#define RX_BUF_SIZE         (1536 + 8)
-#define ETH_JUMBO_MTU        9000
-#define C2_MAGIC            "CEPHEUS"
-#define C2_VERSION           4
-#define C2_IVN              (18 & 0x7fffffff)
-
-#define C2_REG0_SIZE        (16 * 1024)
-#define C2_REG2_SIZE        (2 * 1024 * 1024)
-#define C2_REG4_SIZE        (256 * 1024 * 1024)
-#define C2_NUM_TX_DESC       341
-#define C2_NUM_RX_DESC       256
-#define C2_PCI_REGS_OFFSET  (0x10000)
-#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
-#define C2_RXP_HRXDQ_SIZE   (4096)
-#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
-#define C2_TXP_HTXDQ_SIZE   (4096)
-#define C2_TX_TIMEOUT      (6*HZ)
-
-/* CEPHEUS */
-static const u8 c2_magic[] = {
-       0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
-};
-
-enum adapter_pci_regs {
-       C2_REGS_MAGIC = 0x0000,
-       C2_REGS_VERS = 0x0008,
-       C2_REGS_IVN = 0x000C,
-       C2_REGS_PCI_WINSIZE = 0x0010,
-       C2_REGS_Q0_QSIZE = 0x0014,
-       C2_REGS_Q0_MSGSIZE = 0x0018,
-       C2_REGS_Q0_POOLSTART = 0x001C,
-       C2_REGS_Q0_SHARED = 0x0020,
-       C2_REGS_Q1_QSIZE = 0x0024,
-       C2_REGS_Q1_MSGSIZE = 0x0028,
-       C2_REGS_Q1_SHARED = 0x0030,
-       C2_REGS_Q2_QSIZE = 0x0034,
-       C2_REGS_Q2_MSGSIZE = 0x0038,
-       C2_REGS_Q2_SHARED = 0x0040,
-       C2_REGS_ENADDR = 0x004C,
-       C2_REGS_RDMA_ENADDR = 0x0054,
-       C2_REGS_HRX_CUR = 0x006C,
-};
-
-struct c2_adapter_pci_regs {
-       char reg_magic[8];
-       u32 version;
-       u32 ivn;
-       u32 pci_window_size;
-       u32 q0_q_size;
-       u32 q0_msg_size;
-       u32 q0_pool_start;
-       u32 q0_shared;
-       u32 q1_q_size;
-       u32 q1_msg_size;
-       u32 q1_pool_start;
-       u32 q1_shared;
-       u32 q2_q_size;
-       u32 q2_msg_size;
-       u32 q2_pool_start;
-       u32 q2_shared;
-       u32 log_start;
-       u32 log_size;
-       u8 host_enaddr[8];
-       u8 rdma_enaddr[8];
-       u32 crash_entry;
-       u32 crash_ready[2];
-       u32 fw_txd_cur;
-       u32 fw_hrxd_cur;
-       u32 fw_rxd_cur;
-};
-
-enum pci_regs {
-       C2_HISR = 0x0000,
-       C2_DISR = 0x0004,
-       C2_HIMR = 0x0008,
-       C2_DIMR = 0x000C,
-       C2_NISR0 = 0x0010,
-       C2_NISR1 = 0x0014,
-       C2_NIMR0 = 0x0018,
-       C2_NIMR1 = 0x001C,
-       C2_IDIS = 0x0020,
-};
-
-enum {
-       C2_PCI_HRX_INT = 1 << 8,
-       C2_PCI_HTX_INT = 1 << 17,
-       C2_PCI_HRX_QUI = 1 << 31,
-};
-
-/*
- * Cepheus registers in BAR0.
- */
-struct c2_pci_regs {
-       u32 hostisr;
-       u32 dmaisr;
-       u32 hostimr;
-       u32 dmaimr;
-       u32 netisr0;
-       u32 netisr1;
-       u32 netimr0;
-       u32 netimr1;
-       u32 int_disable;
-};
-
-/* TXP flags */
-enum c2_txp_flags {
-       TXP_HTXD_DONE = 0,
-       TXP_HTXD_READY = 1 << 0,
-       TXP_HTXD_UNINIT = 1 << 1,
-};
-
-/* RXP flags */
-enum c2_rxp_flags {
-       RXP_HRXD_UNINIT = 0,
-       RXP_HRXD_READY = 1 << 0,
-       RXP_HRXD_DONE = 1 << 1,
-};
-
-/* RXP status */
-enum c2_rxp_status {
-       RXP_HRXD_ZERO = 0,
-       RXP_HRXD_OK = 1 << 0,
-       RXP_HRXD_BUF_OV = 1 << 1,
-};
-
-/* TXP descriptor fields */
-enum txp_desc {
-       C2_TXP_FLAGS = 0x0000,
-       C2_TXP_LEN = 0x0002,
-       C2_TXP_ADDR = 0x0004,
-};
-
-/* RXP descriptor fields */
-enum rxp_desc {
-       C2_RXP_FLAGS = 0x0000,
-       C2_RXP_STATUS = 0x0002,
-       C2_RXP_COUNT = 0x0004,
-       C2_RXP_LEN = 0x0006,
-       C2_RXP_ADDR = 0x0008,
-};
-
-struct c2_txp_desc {
-       u16 flags;
-       u16 len;
-       u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_desc {
-       u16 flags;
-       u16 status;
-       u16 count;
-       u16 len;
-       u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_hdr {
-       u16 flags;
-       u16 status;
-       u16 len;
-       u16 rsvd;
-} __attribute__ ((packed));
-
-struct c2_tx_desc {
-       u32 len;
-       u32 status;
-       dma_addr_t next_offset;
-};
-
-struct c2_rx_desc {
-       u32 len;
-       u32 status;
-       dma_addr_t next_offset;
-};
-
-struct c2_alloc {
-       u32 last;
-       u32 max;
-       spinlock_t lock;
-       unsigned long *table;
-};
-
-struct c2_array {
-       struct {
-               void **page;
-               int used;
-       } *page_list;
-};
-
-/*
- * The MQ shared pointer pool is organized as a linked list of
- * chunks. Each chunk contains a linked list of free shared pointers
- * that can be allocated to a given user mode client.
- *
- */
-struct sp_chunk {
-       struct sp_chunk *next;
-       dma_addr_t dma_addr;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       u16 head;
-       u16 shared_ptr[0];
-};
-
-struct c2_pd_table {
-       u32 last;
-       u32 max;
-       spinlock_t lock;
-       unsigned long *table;
-};
-
-struct c2_qp_table {
-       struct idr idr;
-       spinlock_t lock;
-};
-
-struct c2_element {
-       struct c2_element *next;
-       void *ht_desc;          /* host     descriptor */
-       void __iomem *hw_desc;  /* hardware descriptor */
-       struct sk_buff *skb;
-       dma_addr_t mapaddr;
-       u32 maplen;
-};
-
-struct c2_ring {
-       struct c2_element *to_clean;
-       struct c2_element *to_use;
-       struct c2_element *start;
-       unsigned long count;
-};
-
-struct c2_dev {
-       struct ib_device ibdev;
-       void __iomem *regs;
-       void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
-       void __iomem *mmio_rxp_ring;
-       spinlock_t lock;
-       struct pci_dev *pcidev;
-       struct net_device *netdev;
-       struct net_device *pseudo_netdev;
-       unsigned int cur_tx;
-       unsigned int cur_rx;
-       u32 adapter_handle;
-       int device_cap_flags;
-       void __iomem *kva;      /* KVA device memory */
-       unsigned long pa;       /* PA device memory */
-       void **qptr_array;
-
-       struct kmem_cache *host_msg_cache;
-
-       struct list_head cca_link;              /* adapter list */
-       struct list_head eh_wakeup_list;        /* event wakeup list */
-       wait_queue_head_t req_vq_wo;
-
-       /* Cached RNIC properties */
-       struct ib_device_attr props;
-
-       struct c2_pd_table pd_table;
-       struct c2_qp_table qp_table;
-       int ports;              /* num of GigE ports */
-       int devnum;
-       spinlock_t vqlock;      /* sync vbs req MQ */
-
-       /* Verbs Queues */
-       struct c2_mq req_vq;    /* Verbs Request MQ */
-       struct c2_mq rep_vq;    /* Verbs Reply MQ */
-       struct c2_mq aeq;       /* Async Events MQ */
-
-       /* Kernel client MQs */
-       struct sp_chunk *kern_mqsp_pool;
-
-       /* Device updates these values when posting messages to a host
-        * target queue */
-       u16 req_vq_shared;
-       u16 rep_vq_shared;
-       u16 aeq_shared;
-       u16 irq_claimed;
-
-       /*
-        * Shared host target pages for user-accessible MQs.
-        */
-       int hthead;             /* index of first free entry */
-       void *htpages;          /* kernel vaddr */
-       int htlen;              /* length of htpages memory */
-       void *htuva;            /* user mapped vaddr */
-       spinlock_t htlock;      /* serialize allocation */
-
-       u64 adapter_hint_uva;   /* access to the activity FIFO */
-
-       //      spinlock_t aeq_lock;
-       //      spinlock_t rnic_lock;
-
-       __be16 *hint_count;
-       dma_addr_t hint_count_dma;
-       u16 hints_read;
-
-       int init;               /* TRUE if it's ready */
-       char ae_cache_name[16];
-       char vq_cache_name[16];
-};
-
-struct c2_port {
-       u32 msg_enable;
-       struct c2_dev *c2dev;
-       struct net_device *netdev;
-
-       spinlock_t tx_lock;
-       u32 tx_avail;
-       struct c2_ring tx_ring;
-       struct c2_ring rx_ring;
-
-       void *mem;              /* PCI memory for host rings */
-       dma_addr_t dma;
-       unsigned long mem_size;
-
-       u32 rx_buf_size;
-};
-
-/*
- * Activity FIFO registers in BAR0.
- */
-#define PCI_BAR0_HOST_HINT     0x100
-#define PCI_BAR0_ADAPTER_HINT  0x2000
-
-/*
- * Ammasso PCI vendor id and Cepheus PCI device id.
- */
-#define CQ_ARMED       0x01
-#define CQ_WAIT_FOR_DMA        0x80
-
-/*
- * The format of a hint is as follows:
- * Lower 16 bits are the count of hints for the queue.
- * Next 15 bits are the qp_index
- * Upper most bit depends on who reads it:
- *    If read by producer, then it means Full (1) or Not-Full (0)
- *    If read by consumer, then it means Empty (1) or Not-Empty (0)
- */
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-
-/*
- * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
- * struct.
- */
-#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
-
-#ifndef readq
-static inline u64 readq(const void __iomem * addr)
-{
-       u64 ret = readl(addr + 4);
-       ret <<= 32;
-       ret |= readl(addr);
-
-       return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void __raw_writeq(u64 val, void __iomem * addr)
-{
-       __raw_writel((u32) (val), addr);
-       __raw_writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
-#define C2_SET_CUR_RX(c2dev, cur_rx) \
-       __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
-
-#define C2_GET_CUR_RX(c2dev) \
-       be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
-
-static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
-{
-       return container_of(ibdev, struct c2_dev, ibdev);
-}
-
-static inline int c2_errno(void *reply)
-{
-       switch (c2_wr_get_result(reply)) {
-       case C2_OK:
-               return 0;
-       case CCERR_NO_BUFS:
-       case CCERR_INSUFFICIENT_RESOURCES:
-       case CCERR_ZERO_RDMA_READ_RESOURCES:
-               return -ENOMEM;
-       case CCERR_MR_IN_USE:
-       case CCERR_QP_IN_USE:
-               return -EBUSY;
-       case CCERR_ADDR_IN_USE:
-               return -EADDRINUSE;
-       case CCERR_ADDR_NOT_AVAIL:
-               return -EADDRNOTAVAIL;
-       case CCERR_CONN_RESET:
-               return -ECONNRESET;
-       case CCERR_NOT_IMPLEMENTED:
-       case CCERR_INVALID_WQE:
-               return -ENOSYS;
-       case CCERR_QP_NOT_PRIVILEGED:
-               return -EPERM;
-       case CCERR_STACK_ERROR:
-               return -EPROTO;
-       case CCERR_ACCESS_VIOLATION:
-       case CCERR_BASE_AND_BOUNDS_VIOLATION:
-               return -EFAULT;
-       case CCERR_STAG_STATE_NOT_INVALID:
-       case CCERR_INVALID_ADDRESS:
-       case CCERR_INVALID_CQ:
-       case CCERR_INVALID_EP:
-       case CCERR_INVALID_MODIFIER:
-       case CCERR_INVALID_MTU:
-       case CCERR_INVALID_PD_ID:
-       case CCERR_INVALID_QP:
-       case CCERR_INVALID_RNIC:
-       case CCERR_INVALID_STAG:
-               return -EINVAL;
-       default:
-               return -EAGAIN;
-       }
-}
-
-/* Device */
-int c2_register_device(struct c2_dev *c2dev);
-void c2_unregister_device(struct c2_dev *c2dev);
-int c2_rnic_init(struct c2_dev *c2dev);
-void c2_rnic_term(struct c2_dev *c2dev);
-void c2_rnic_interrupt(struct c2_dev *c2dev);
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-
-/* QPs */
-int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
-                      struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
-                       struct ib_qp_attr *attr, int attr_mask);
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
-                                int ord, int ird);
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
-                       struct ib_send_wr **bad_wr);
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
-                          struct ib_recv_wr **bad_wr);
-void c2_init_qp_table(struct c2_dev *c2dev);
-void c2_cleanup_qp_table(struct c2_dev *c2dev);
-void c2_set_qp_state(struct c2_qp *, int);
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
-
-/* PDs */
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-int c2_init_pd_table(struct c2_dev *c2dev);
-void c2_cleanup_pd_table(struct c2_dev *c2dev);
-
-/* CQs */
-int c2_init_cq(struct c2_dev *c2dev, int entries,
-                     struct c2_ucontext *ctx, struct c2_cq *cq);
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-
-/* CM */
-int c2_llp_connect(struct iw_cm_id *cm_id,
-                         struct iw_cm_conn_param *iw_param);
-int c2_llp_accept(struct iw_cm_id *cm_id,
-                        struct iw_cm_conn_param *iw_param);
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
-                        u8 pdata_len);
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
-int c2_llp_service_destroy(struct iw_cm_id *cm_id);
-
-/* MM */
-int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
-                                     int page_size, int pbl_depth, u32 length,
-                                     u32 off, u64 *va, enum c2_acf acf,
-                                     struct c2_mr *mr);
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
-
-/* AE */
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
-
-/* MQSP Allocator */
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
-                            struct sp_chunk **root);
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-                            dma_addr_t *dma_addr, gfp_t gfp_mask);
-void c2_free_mqsp(__be16* mqsp);
-#endif
diff --git a/drivers/staging/rdma/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
deleted file mode 100644 (file)
index eb7a92b..0000000
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_status.h"
-#include "c2_ae.h"
-
-static int c2_convert_cm_status(u32 c2_status)
-{
-       switch (c2_status) {
-       case C2_CONN_STATUS_SUCCESS:
-               return 0;
-       case C2_CONN_STATUS_REJECTED:
-               return -ENETRESET;
-       case C2_CONN_STATUS_REFUSED:
-               return -ECONNREFUSED;
-       case C2_CONN_STATUS_TIMEDOUT:
-               return -ETIMEDOUT;
-       case C2_CONN_STATUS_NETUNREACH:
-               return -ENETUNREACH;
-       case C2_CONN_STATUS_HOSTUNREACH:
-               return -EHOSTUNREACH;
-       case C2_CONN_STATUS_INVALID_RNIC:
-               return -EINVAL;
-       case C2_CONN_STATUS_INVALID_QP:
-               return -EINVAL;
-       case C2_CONN_STATUS_INVALID_QP_STATE:
-               return -EINVAL;
-       case C2_CONN_STATUS_ADDR_NOT_AVAIL:
-               return -EADDRNOTAVAIL;
-       default:
-               printk(KERN_ERR PFX
-                      "%s - Unable to convert CM status: %d\n",
-                      __func__, c2_status);
-               return -EIO;
-       }
-}
-
-static const char* to_event_str(int event)
-{
-       static const char* event_str[] = {
-               "CCAE_REMOTE_SHUTDOWN",
-               "CCAE_ACTIVE_CONNECT_RESULTS",
-               "CCAE_CONNECTION_REQUEST",
-               "CCAE_LLP_CLOSE_COMPLETE",
-               "CCAE_TERMINATE_MESSAGE_RECEIVED",
-               "CCAE_LLP_CONNECTION_RESET",
-               "CCAE_LLP_CONNECTION_LOST",
-               "CCAE_LLP_SEGMENT_SIZE_INVALID",
-               "CCAE_LLP_INVALID_CRC",
-               "CCAE_LLP_BAD_FPDU",
-               "CCAE_INVALID_DDP_VERSION",
-               "CCAE_INVALID_RDMA_VERSION",
-               "CCAE_UNEXPECTED_OPCODE",
-               "CCAE_INVALID_DDP_QUEUE_NUMBER",
-               "CCAE_RDMA_READ_NOT_ENABLED",
-               "CCAE_RDMA_WRITE_NOT_ENABLED",
-               "CCAE_RDMA_READ_TOO_SMALL",
-               "CCAE_NO_L_BIT",
-               "CCAE_TAGGED_INVALID_STAG",
-               "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
-               "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
-               "CCAE_TAGGED_INVALID_PD",
-               "CCAE_WRAP_ERROR",
-               "CCAE_BAD_CLOSE",
-               "CCAE_BAD_LLP_CLOSE",
-               "CCAE_INVALID_MSN_RANGE",
-               "CCAE_INVALID_MSN_GAP",
-               "CCAE_IRRQ_OVERFLOW",
-               "CCAE_IRRQ_MSN_GAP",
-               "CCAE_IRRQ_MSN_RANGE",
-               "CCAE_IRRQ_INVALID_STAG",
-               "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
-               "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
-               "CCAE_IRRQ_INVALID_PD",
-               "CCAE_IRRQ_WRAP_ERROR",
-               "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
-               "CCAE_CQ_RQ_COMPLETION_ERROR",
-               "CCAE_QP_SRQ_WQE_ERROR",
-               "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
-               "CCAE_CQ_OVERFLOW",
-               "CCAE_CQ_OPERATION_ERROR",
-               "CCAE_SRQ_LIMIT_REACHED",
-               "CCAE_QP_RQ_LIMIT_REACHED",
-               "CCAE_SRQ_CATASTROPHIC_ERROR",
-               "CCAE_RNIC_CATASTROPHIC_ERROR"
-       };
-
-       if (event < CCAE_REMOTE_SHUTDOWN ||
-           event > CCAE_RNIC_CATASTROPHIC_ERROR)
-               return "<invalid event>";
-
-       event -= CCAE_REMOTE_SHUTDOWN;
-       return event_str[event];
-}
-
-static const char *to_qp_state_str(int state)
-{
-       switch (state) {
-       case C2_QP_STATE_IDLE:
-               return "C2_QP_STATE_IDLE";
-       case C2_QP_STATE_CONNECTING:
-               return "C2_QP_STATE_CONNECTING";
-       case C2_QP_STATE_RTS:
-               return "C2_QP_STATE_RTS";
-       case C2_QP_STATE_CLOSING:
-               return "C2_QP_STATE_CLOSING";
-       case C2_QP_STATE_TERMINATE:
-               return "C2_QP_STATE_TERMINATE";
-       case C2_QP_STATE_ERROR:
-               return "C2_QP_STATE_ERROR";
-       default:
-               return "<invalid QP state>";
-       }
-}
-
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
-{
-       struct c2_mq *mq = c2dev->qptr_array[mq_index];
-       union c2wr *wr;
-       void *resource_user_context;
-       struct iw_cm_event cm_event;
-       struct ib_event ib_event;
-       enum c2_resource_indicator resource_indicator;
-       enum c2_event_id event_id;
-       unsigned long flags;
-       int status;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
-
-       /*
-        * retrieve the message
-        */
-       wr = c2_mq_consume(mq);
-       if (!wr)
-               return;
-
-       memset(&ib_event, 0, sizeof(ib_event));
-       memset(&cm_event, 0, sizeof(cm_event));
-
-       event_id = c2_wr_get_id(wr);
-       resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
-       resource_user_context =
-           (void *) (unsigned long) wr->ae.ae_generic.user_context;
-
-       status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
-
-       pr_debug("event received c2_dev=%p, event_id=%d, "
-               "resource_indicator=%d, user_context=%p, status = %d\n",
-               c2dev, event_id, resource_indicator, resource_user_context,
-               status);
-
-       switch (resource_indicator) {
-       case C2_RES_IND_QP:{
-
-               struct c2_qp *qp = resource_user_context;
-               struct iw_cm_id *cm_id = qp->cm_id;
-               struct c2wr_ae_active_connect_results *res;
-
-               if (!cm_id) {
-                       pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
-                               qp);
-                       goto ignore_it;
-               }
-               pr_debug("%s: event = %s, user_context=%llx, "
-                       "resource_type=%x, "
-                       "resource=%x, qp_state=%s\n",
-                       __func__,
-                       to_event_str(event_id),
-                       (unsigned long long) wr->ae.ae_generic.user_context,
-                       be32_to_cpu(wr->ae.ae_generic.resource_type),
-                       be32_to_cpu(wr->ae.ae_generic.resource),
-                       to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
-
-               c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
-
-               switch (event_id) {
-               case CCAE_ACTIVE_CONNECT_RESULTS:
-                       res = &wr->ae.ae_active_connect_results;
-                       cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
-                       laddr->sin_addr.s_addr = res->laddr;
-                       raddr->sin_addr.s_addr = res->raddr;
-                       laddr->sin_port = res->lport;
-                       raddr->sin_port = res->rport;
-                       if (status == 0) {
-                               cm_event.private_data_len =
-                                       be32_to_cpu(res->private_data_length);
-                               cm_event.private_data = res->private_data;
-                       } else {
-                               spin_lock_irqsave(&qp->lock, flags);
-                               if (qp->cm_id) {
-                                       qp->cm_id->rem_ref(qp->cm_id);
-                                       qp->cm_id = NULL;
-                               }
-                               spin_unlock_irqrestore(&qp->lock, flags);
-                               cm_event.private_data_len = 0;
-                               cm_event.private_data = NULL;
-                       }
-                       if (cm_id->event_handler)
-                               cm_id->event_handler(cm_id, &cm_event);
-                       break;
-               case CCAE_TERMINATE_MESSAGE_RECEIVED:
-               case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
-                       ib_event.device = &c2dev->ibdev;
-                       ib_event.element.qp = &qp->ibqp;
-                       ib_event.event = IB_EVENT_QP_REQ_ERR;
-
-                       if (qp->ibqp.event_handler)
-                               qp->ibqp.event_handler(&ib_event,
-                                                      qp->ibqp.
-                                                      qp_context);
-                       break;
-               case CCAE_BAD_CLOSE:
-               case CCAE_LLP_CLOSE_COMPLETE:
-               case CCAE_LLP_CONNECTION_RESET:
-               case CCAE_LLP_CONNECTION_LOST:
-                       BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
-
-                       spin_lock_irqsave(&qp->lock, flags);
-                       if (qp->cm_id) {
-                               qp->cm_id->rem_ref(qp->cm_id);
-                               qp->cm_id = NULL;
-                       }
-                       spin_unlock_irqrestore(&qp->lock, flags);
-                       cm_event.event = IW_CM_EVENT_CLOSE;
-                       cm_event.status = 0;
-                       if (cm_id->event_handler)
-                               cm_id->event_handler(cm_id, &cm_event);
-                       break;
-               default:
-                       BUG_ON(1);
-                       pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
-                               "CM_ID=%p\n",
-                               __func__, __LINE__,
-                               event_id, qp, cm_id);
-                       break;
-               }
-               break;
-       }
-
-       case C2_RES_IND_EP:{
-
-               struct c2wr_ae_connection_request *req =
-                       &wr->ae.ae_connection_request;
-               struct iw_cm_id *cm_id =
-                       resource_user_context;
-
-               pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
-               if (event_id != CCAE_CONNECTION_REQUEST) {
-                       pr_debug("%s: Invalid event_id: %d\n",
-                               __func__, event_id);
-                       break;
-               }
-               cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
-               cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
-               laddr->sin_addr.s_addr = req->laddr;
-               raddr->sin_addr.s_addr = req->raddr;
-               laddr->sin_port = req->lport;
-               raddr->sin_port = req->rport;
-               cm_event.private_data_len =
-                       be32_to_cpu(req->private_data_length);
-               cm_event.private_data = req->private_data;
-               /*
-                * Until ird/ord negotiation via MPAv2 support is added, send
-                * max supported values
-                */
-               cm_event.ird = cm_event.ord = 128;
-
-               if (cm_id->event_handler)
-                       cm_id->event_handler(cm_id, &cm_event);
-               break;
-       }
-
-       case C2_RES_IND_CQ:{
-               struct c2_cq *cq =
-                   resource_user_context;
-
-               pr_debug("IB_EVENT_CQ_ERR\n");
-               ib_event.device = &c2dev->ibdev;
-               ib_event.element.cq = &cq->ibcq;
-               ib_event.event = IB_EVENT_CQ_ERR;
-
-               if (cq->ibcq.event_handler)
-                       cq->ibcq.event_handler(&ib_event,
-                                              cq->ibcq.cq_context);
-               break;
-       }
-
-       default:
-               printk("Bad resource indicator = %d\n",
-                      resource_indicator);
-               break;
-       }
-
- ignore_it:
-       c2_mq_free(mq);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
deleted file mode 100644 (file)
index 3a065c3..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_AE_H_
-#define _C2_AE_H_
-
-/*
- * WARNING: If you change this file, also bump C2_IVN_BASE
- * in common/include/clustercore/c2_ivn.h.
- */
-
-/*
- * Asynchronous Event Identifiers
- *
- * These start at 0x80 only so it's obvious from inspection that
- * they are not work-request statuses.  This isn't critical.
- *
- * NOTE: these event id's must fit in eight bits.
- */
-enum c2_event_id {
-       CCAE_REMOTE_SHUTDOWN = 0x80,
-       CCAE_ACTIVE_CONNECT_RESULTS,
-       CCAE_CONNECTION_REQUEST,
-       CCAE_LLP_CLOSE_COMPLETE,
-       CCAE_TERMINATE_MESSAGE_RECEIVED,
-       CCAE_LLP_CONNECTION_RESET,
-       CCAE_LLP_CONNECTION_LOST,
-       CCAE_LLP_SEGMENT_SIZE_INVALID,
-       CCAE_LLP_INVALID_CRC,
-       CCAE_LLP_BAD_FPDU,
-       CCAE_INVALID_DDP_VERSION,
-       CCAE_INVALID_RDMA_VERSION,
-       CCAE_UNEXPECTED_OPCODE,
-       CCAE_INVALID_DDP_QUEUE_NUMBER,
-       CCAE_RDMA_READ_NOT_ENABLED,
-       CCAE_RDMA_WRITE_NOT_ENABLED,
-       CCAE_RDMA_READ_TOO_SMALL,
-       CCAE_NO_L_BIT,
-       CCAE_TAGGED_INVALID_STAG,
-       CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
-       CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
-       CCAE_TAGGED_INVALID_PD,
-       CCAE_WRAP_ERROR,
-       CCAE_BAD_CLOSE,
-       CCAE_BAD_LLP_CLOSE,
-       CCAE_INVALID_MSN_RANGE,
-       CCAE_INVALID_MSN_GAP,
-       CCAE_IRRQ_OVERFLOW,
-       CCAE_IRRQ_MSN_GAP,
-       CCAE_IRRQ_MSN_RANGE,
-       CCAE_IRRQ_INVALID_STAG,
-       CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
-       CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
-       CCAE_IRRQ_INVALID_PD,
-       CCAE_IRRQ_WRAP_ERROR,
-       CCAE_CQ_SQ_COMPLETION_OVERFLOW,
-       CCAE_CQ_RQ_COMPLETION_ERROR,
-       CCAE_QP_SRQ_WQE_ERROR,
-       CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
-       CCAE_CQ_OVERFLOW,
-       CCAE_CQ_OPERATION_ERROR,
-       CCAE_SRQ_LIMIT_REACHED,
-       CCAE_QP_RQ_LIMIT_REACHED,
-       CCAE_SRQ_CATASTROPHIC_ERROR,
-       CCAE_RNIC_CATASTROPHIC_ERROR
-/* WARNING If you add more id's, make sure their values fit in eight bits. */
-};
-
-/*
- * Resource Indicators and Identifiers
- */
-enum c2_resource_indicator {
-       C2_RES_IND_QP = 1,
-       C2_RES_IND_EP,
-       C2_RES_IND_CQ,
-       C2_RES_IND_SRQ,
-};
-
-#endif /* _C2_AE_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
deleted file mode 100644 (file)
index 039872d..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/bitmap.h>
-
-#include "c2.h"
-
-static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
-                              struct sp_chunk **head)
-{
-       int i;
-       struct sp_chunk *new_head;
-       dma_addr_t dma_addr;
-
-       new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
-                                     &dma_addr, gfp_mask);
-       if (new_head == NULL)
-               return -ENOMEM;
-
-       new_head->dma_addr = dma_addr;
-       dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
-
-       new_head->next = NULL;
-       new_head->head = 0;
-
-       /* build list where each index is the next free slot */
-       for (i = 0;
-            i < (PAGE_SIZE - sizeof(struct sp_chunk) -
-                 sizeof(u16)) / sizeof(u16) - 1;
-            i++) {
-               new_head->shared_ptr[i] = i + 1;
-       }
-       /* terminate list */
-       new_head->shared_ptr[i] = 0xFFFF;
-
-       *head = new_head;
-       return 0;
-}
-
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
-                     struct sp_chunk **root)
-{
-       return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
-}
-
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
-{
-       struct sp_chunk *next;
-
-       while (root) {
-               next = root->next;
-               dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
-                                 dma_unmap_addr(root, mapping));
-               root = next;
-       }
-}
-
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-                     dma_addr_t *dma_addr, gfp_t gfp_mask)
-{
-       u16 mqsp;
-
-       while (head) {
-               mqsp = head->head;
-               if (mqsp != 0xFFFF) {
-                       head->head = head->shared_ptr[mqsp];
-                       break;
-               } else if (head->next == NULL) {
-                       if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
-                           0) {
-                               head = head->next;
-                               mqsp = head->head;
-                               head->head = head->shared_ptr[mqsp];
-                               break;
-                       } else
-                               return NULL;
-               } else
-                       head = head->next;
-       }
-       if (head) {
-               *dma_addr = head->dma_addr +
-                           ((unsigned long) &(head->shared_ptr[mqsp]) -
-                            (unsigned long) head);
-               pr_debug("%s addr %p dma_addr %llx\n", __func__,
-                        &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
-               return (__force __be16 *) &(head->shared_ptr[mqsp]);
-       }
-       return NULL;
-}
-
-void c2_free_mqsp(__be16 *mqsp)
-{
-       struct sp_chunk *head;
-       u16 idx;
-
-       /* The chunk containing this ptr begins at the page boundary */
-       head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
-
-       /* Link head to new mqsp */
-       *mqsp = (__force __be16) head->head;
-
-       /* Compute the shared_ptr index */
-       idx = (offset_in_page(mqsp)) >> 1;
-       idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
-
-       /* Point this index at the head */
-       head->shared_ptr[idx] = head->head;
-
-       /* Point head at this index */
-       head->head = idx;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
deleted file mode 100644 (file)
index f8dbdb9..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_vq.h"
-#include <rdma/iw_cm.h>
-
-int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       struct c2_dev *c2dev = to_c2dev(cm_id->device);
-       struct ib_qp *ibqp;
-       struct c2_qp *qp;
-       struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
-       struct c2_vq_req *vq_req;
-       int err;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
-       if (cm_id->remote_addr.ss_family != AF_INET)
-               return -ENOSYS;
-
-       ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
-       if (!ibqp)
-               return -EINVAL;
-       qp = to_c2qp(ibqp);
-
-       /* Associate QP <--> CM_ID */
-       cm_id->provider_data = qp;
-       cm_id->add_ref(cm_id);
-       qp->cm_id = cm_id;
-
-       /*
-        * only support the max private_data length
-        */
-       if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
-               err = -EINVAL;
-               goto bail0;
-       }
-       /*
-        * Set the rdma read limits
-        */
-       err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
-       if (err)
-               goto bail0;
-
-       /*
-        * Create and send a WR_QP_CONNECT...
-        */
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       c2_wr_set_id(wr, CCWR_QP_CONNECT);
-       wr->hdr.context = 0;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->qp_handle = qp->adapter_handle;
-
-       wr->remote_addr = raddr->sin_addr.s_addr;
-       wr->remote_port = raddr->sin_port;
-
-       /*
-        * Move any private data from the callers's buf into
-        * the WR.
-        */
-       if (iw_param->private_data) {
-               wr->private_data_length =
-                       cpu_to_be32(iw_param->private_data_len);
-               memcpy(&wr->private_data[0], iw_param->private_data,
-                      iw_param->private_data_len);
-       } else
-               wr->private_data_length = 0;
-
-       /*
-        * Send WR to adapter.  NOTE: There is no synch reply from
-        * the adapter.
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       vq_req_free(c2dev, vq_req);
-
- bail1:
-       kfree(wr);
- bail0:
-       if (err) {
-               /*
-                * If we fail, release reference on QP and
-                * disassociate QP from CM_ID
-                */
-               cm_id->provider_data = NULL;
-               qp->cm_id = NULL;
-               cm_id->rem_ref(cm_id);
-       }
-       return err;
-}
-
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
-{
-       struct c2_dev *c2dev;
-       struct c2wr_ep_listen_create_req wr;
-       struct c2wr_ep_listen_create_rep *reply;
-       struct c2_vq_req *vq_req;
-       int err;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-
-       if (cm_id->local_addr.ss_family != AF_INET)
-               return -ENOSYS;
-
-       c2dev = to_c2dev(cm_id->device);
-       if (c2dev == NULL)
-               return -EINVAL;
-
-       /*
-        * Allocate verbs request.
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
-       wr.hdr.context = (u64) (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.local_addr = laddr->sin_addr.s_addr;
-       wr.local_port = laddr->sin_port;
-       wr.backlog = cpu_to_be32(backlog);
-       wr.user_context = (u64) (unsigned long) cm_id;
-
-       /*
-        * Reference the request struct.  Dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       /*
-        * Process reply
-        */
-       reply =
-           (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       if ((err = c2_errno(reply)) != 0)
-               goto bail1;
-
-       /*
-        * Keep the adapter handle. Used in subsequent destroy
-        */
-       cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
-
-       /*
-        * free vq stuff
-        */
-       vq_repbuf_free(c2dev, reply);
-       vq_req_free(c2dev, vq_req);
-
-       return 0;
-
- bail1:
-       vq_repbuf_free(c2dev, reply);
- bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-
-int c2_llp_service_destroy(struct iw_cm_id *cm_id)
-{
-
-       struct c2_dev *c2dev;
-       struct c2wr_ep_listen_destroy_req wr;
-       struct c2wr_ep_listen_destroy_rep *reply;
-       struct c2_vq_req *vq_req;
-       int err;
-
-       c2dev = to_c2dev(cm_id->device);
-       if (c2dev == NULL)
-               return -EINVAL;
-
-       /*
-        * Allocate verbs request.
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       /*
-        * Process reply
-        */
-       reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       vq_repbuf_free(c2dev, reply);
- bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       struct c2_dev *c2dev = to_c2dev(cm_id->device);
-       struct c2_qp *qp;
-       struct ib_qp *ibqp;
-       struct c2wr_cr_accept_req *wr;  /* variable length WR */
-       struct c2_vq_req *vq_req;
-       struct c2wr_cr_accept_rep *reply;       /* VQ Reply msg ptr. */
-       int err;
-
-       ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
-       if (!ibqp)
-               return -EINVAL;
-       qp = to_c2qp(ibqp);
-
-       /* Set the RDMA read limits */
-       err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
-       if (err)
-               goto bail0;
-
-       /* Allocate verbs request. */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-       vq_req->qp = qp;
-       vq_req->cm_id = cm_id;
-       vq_req->event = IW_CM_EVENT_ESTABLISHED;
-
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       /* Build the WR */
-       c2_wr_set_id(wr, CCWR_CR_ACCEPT);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
-       wr->qp_handle = qp->adapter_handle;
-
-       /* Replace the cr_handle with the QP after accept */
-       cm_id->provider_data = qp;
-       cm_id->add_ref(cm_id);
-       qp->cm_id = cm_id;
-
-       cm_id->provider_data = qp;
-
-       /* Validate private_data length */
-       if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
-               err = -EINVAL;
-               goto bail1;
-       }
-
-       if (iw_param->private_data) {
-               wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
-               memcpy(&wr->private_data[0],
-                      iw_param->private_data, iw_param->private_data_len);
-       } else
-               wr->private_data_length = 0;
-
-       /* Reference the request struct.  Dereferenced in the int handler. */
-       vq_req_get(c2dev, vq_req);
-
-       /* Send WR to adapter */
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       /* Wait for reply from adapter */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       /* Check that reply is present */
-       reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-
-       if (!err)
-               c2_set_qp_state(qp, C2_QP_STATE_RTS);
- bail1:
-       kfree(wr);
-       vq_req_free(c2dev, vq_req);
- bail0:
-       if (err) {
-               /*
-                * If we fail, release reference on QP and
-                * disassociate QP from CM_ID
-                */
-               cm_id->provider_data = NULL;
-               qp->cm_id = NULL;
-               cm_id->rem_ref(cm_id);
-       }
-       return err;
-}
-
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
-       struct c2_dev *c2dev;
-       struct c2wr_cr_reject_req wr;
-       struct c2_vq_req *vq_req;
-       struct c2wr_cr_reject_rep *reply;
-       int err;
-
-       c2dev = to_c2dev(cm_id->device);
-
-       /*
-        * Allocate verbs request.
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_CR_REJECT);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_cr_reject_rep *) (unsigned long)
-               vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-       err = c2_errno(reply);
-       /*
-        * free vq stuff
-        */
-       vq_repbuf_free(c2dev, reply);
-
- bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
deleted file mode 100644 (file)
index 7ad0c08..0000000
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
-
-static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
-{
-       struct c2_cq *cq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c2dev->lock, flags);
-       cq = c2dev->qptr_array[cqn];
-       if (!cq) {
-               spin_unlock_irqrestore(&c2dev->lock, flags);
-               return NULL;
-       }
-       atomic_inc(&cq->refcount);
-       spin_unlock_irqrestore(&c2dev->lock, flags);
-       return cq;
-}
-
-static void c2_cq_put(struct c2_cq *cq)
-{
-       if (atomic_dec_and_test(&cq->refcount))
-               wake_up(&cq->wait);
-}
-
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
-{
-       struct c2_cq *cq;
-
-       cq = c2_cq_get(c2dev, mq_index);
-       if (!cq) {
-               printk("discarding events on destroyed CQN=%d\n", mq_index);
-               return;
-       }
-
-       (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-       c2_cq_put(cq);
-}
-
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
-{
-       struct c2_cq *cq;
-       struct c2_mq *q;
-
-       cq = c2_cq_get(c2dev, mq_index);
-       if (!cq)
-               return;
-
-       spin_lock_irq(&cq->lock);
-       q = &cq->mq;
-       if (q && !c2_mq_empty(q)) {
-               u16 priv = q->priv;
-               struct c2wr_ce *msg;
-
-               while (priv != be16_to_cpu(*q->shared)) {
-                       msg = (struct c2wr_ce *)
-                               (q->msg_pool.host + priv * q->msg_size);
-                       if (msg->qp_user_context == (u64) (unsigned long) qp) {
-                               msg->qp_user_context = (u64) 0;
-                       }
-                       priv = (priv + 1) % q->q_size;
-               }
-       }
-       spin_unlock_irq(&cq->lock);
-       c2_cq_put(cq);
-}
-
-static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
-{
-       switch (status) {
-       case C2_OK:
-               return IB_WC_SUCCESS;
-       case CCERR_FLUSHED:
-               return IB_WC_WR_FLUSH_ERR;
-       case CCERR_BASE_AND_BOUNDS_VIOLATION:
-               return IB_WC_LOC_PROT_ERR;
-       case CCERR_ACCESS_VIOLATION:
-               return IB_WC_LOC_ACCESS_ERR;
-       case CCERR_TOTAL_LENGTH_TOO_BIG:
-               return IB_WC_LOC_LEN_ERR;
-       case CCERR_INVALID_WINDOW:
-               return IB_WC_MW_BIND_ERR;
-       default:
-               return IB_WC_GENERAL_ERR;
-       }
-}
-
-
-static inline int c2_poll_one(struct c2_dev *c2dev,
-                             struct c2_cq *cq, struct ib_wc *entry)
-{
-       struct c2wr_ce *ce;
-       struct c2_qp *qp;
-       int is_recv = 0;
-
-       ce = c2_mq_consume(&cq->mq);
-       if (!ce) {
-               return -EAGAIN;
-       }
-
-       /*
-        * if the qp returned is null then this qp has already
-        * been freed and we are unable process the completion.
-        * try pulling the next message
-        */
-       while ((qp =
-               (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
-               c2_mq_free(&cq->mq);
-               ce = c2_mq_consume(&cq->mq);
-               if (!ce)
-                       return -EAGAIN;
-       }
-
-       entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
-       entry->wr_id = ce->hdr.context;
-       entry->qp = &qp->ibqp;
-       entry->wc_flags = 0;
-       entry->slid = 0;
-       entry->sl = 0;
-       entry->src_qp = 0;
-       entry->dlid_path_bits = 0;
-       entry->pkey_index = 0;
-
-       switch (c2_wr_get_id(ce)) {
-       case C2_WR_TYPE_SEND:
-               entry->opcode = IB_WC_SEND;
-               break;
-       case C2_WR_TYPE_RDMA_WRITE:
-               entry->opcode = IB_WC_RDMA_WRITE;
-               break;
-       case C2_WR_TYPE_RDMA_READ:
-               entry->opcode = IB_WC_RDMA_READ;
-               break;
-       case C2_WR_TYPE_RECV:
-               entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
-               entry->opcode = IB_WC_RECV;
-               is_recv = 1;
-               break;
-       default:
-               break;
-       }
-
-       /* consume the WQEs */
-       if (is_recv)
-               c2_mq_lconsume(&qp->rq_mq, 1);
-       else
-               c2_mq_lconsume(&qp->sq_mq,
-                              be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
-
-       /* free the message */
-       c2_mq_free(&cq->mq);
-
-       return 0;
-}
-
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
-       struct c2_dev *c2dev = to_c2dev(ibcq->device);
-       struct c2_cq *cq = to_c2cq(ibcq);
-       unsigned long flags;
-       int npolled, err;
-
-       spin_lock_irqsave(&cq->lock, flags);
-
-       for (npolled = 0; npolled < num_entries; ++npolled) {
-
-               err = c2_poll_one(c2dev, cq, entry + npolled);
-               if (err)
-                       break;
-       }
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-       return npolled;
-}
-
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
-       struct c2_mq_shared __iomem *shared;
-       struct c2_cq *cq;
-       unsigned long flags;
-       int ret = 0;
-
-       cq = to_c2cq(ibcq);
-       shared = cq->mq.peer;
-
-       if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
-               writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
-       else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
-               writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
-       else
-               return -EINVAL;
-
-       writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
-
-       /*
-        * Now read back shared->armed to make the PCI
-        * write synchronous.  This is necessary for
-        * correct cq notification semantics.
-        */
-       readb(&shared->armed);
-
-       if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
-               spin_lock_irqsave(&cq->lock, flags);
-               ret = !c2_mq_empty(&cq->mq);
-               spin_unlock_irqrestore(&cq->lock, flags);
-       }
-
-       return ret;
-}
-
-static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
-{
-       dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
-                         mq->msg_pool.host, dma_unmap_addr(mq, mapping));
-}
-
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
-                          size_t q_size, size_t msg_size)
-{
-       u8 *pool_start;
-
-       if (q_size > SIZE_MAX / msg_size)
-               return -EINVAL;
-
-       pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
-                                       &mq->host_dma, GFP_KERNEL);
-       if (!pool_start)
-               return -ENOMEM;
-
-       c2_mq_rep_init(mq,
-                      0,               /* index (currently unknown) */
-                      q_size,
-                      msg_size,
-                      pool_start,
-                      NULL,    /* peer (currently unknown) */
-                      C2_MQ_HOST_TARGET);
-
-       dma_unmap_addr_set(mq, mapping, mq->host_dma);
-
-       return 0;
-}
-
-int c2_init_cq(struct c2_dev *c2dev, int entries,
-              struct c2_ucontext *ctx, struct c2_cq *cq)
-{
-       struct c2wr_cq_create_req wr;
-       struct c2wr_cq_create_rep *reply;
-       unsigned long peer_pa;
-       struct c2_vq_req *vq_req;
-       int err;
-
-       might_sleep();
-
-       cq->ibcq.cqe = entries - 1;
-       cq->is_kernel = !ctx;
-
-       /* Allocate a shared pointer */
-       cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                     &cq->mq.shared_dma, GFP_KERNEL);
-       if (!cq->mq.shared)
-               return -ENOMEM;
-
-       /* Allocate pages for the message pool */
-       err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
-       if (err)
-               goto bail0;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_CQ_CREATE);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.msg_size = cpu_to_be32(cq->mq.msg_size);
-       wr.depth = cpu_to_be32(cq->mq.q_size);
-       wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
-       wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
-       wr.user_context = (u64) (unsigned long) (cq);
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail2;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail2;
-
-       reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail2;
-       }
-
-       if ((err = c2_errno(reply)) != 0)
-               goto bail3;
-
-       cq->adapter_handle = reply->cq_handle;
-       cq->mq.index = be32_to_cpu(reply->mq_index);
-
-       peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
-       cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
-       if (!cq->mq.peer) {
-               err = -ENOMEM;
-               goto bail3;
-       }
-
-       vq_repbuf_free(c2dev, reply);
-       vq_req_free(c2dev, vq_req);
-
-       spin_lock_init(&cq->lock);
-       atomic_set(&cq->refcount, 1);
-       init_waitqueue_head(&cq->wait);
-
-       /*
-        * Use the MQ index allocated by the adapter to
-        * store the CQ in the qptr_array
-        */
-       cq->cqn = cq->mq.index;
-       c2dev->qptr_array[cq->cqn] = cq;
-
-       return 0;
-
-bail3:
-       vq_repbuf_free(c2dev, reply);
-bail2:
-       vq_req_free(c2dev, vq_req);
-bail1:
-       c2_free_cq_buf(c2dev, &cq->mq);
-bail0:
-       c2_free_mqsp(cq->mq.shared);
-
-       return err;
-}
-
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
-{
-       int err;
-       struct c2_vq_req *vq_req;
-       struct c2wr_cq_destroy_req wr;
-       struct c2wr_cq_destroy_rep *reply;
-
-       might_sleep();
-
-       /* Clear CQ from the qptr array */
-       spin_lock_irq(&c2dev->lock);
-       c2dev->qptr_array[cq->mq.index] = NULL;
-       atomic_dec(&cq->refcount);
-       spin_unlock_irq(&c2dev->lock);
-
-       wait_event(cq->wait, !atomic_read(&cq->refcount));
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               goto bail0;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.cq_handle = cq->adapter_handle;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-       if (reply)
-               vq_repbuf_free(c2dev, reply);
-bail1:
-       vq_req_free(c2dev, vq_req);
-bail0:
-       if (cq->is_kernel) {
-               c2_free_cq_buf(c2dev, &cq->mq);
-       }
-
-       return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
deleted file mode 100644 (file)
index 74b32a9..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_vq.h"
-
-static void handle_mq(struct c2_dev *c2dev, u32 index);
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
-
-/*
- * Handle RNIC interrupts
- */
-void c2_rnic_interrupt(struct c2_dev *c2dev)
-{
-       unsigned int mq_index;
-
-       while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
-               mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
-               if (mq_index & 0x80000000) {
-                       break;
-               }
-
-               c2dev->hints_read++;
-               handle_mq(c2dev, mq_index);
-       }
-
-}
-
-/*
- * Top level MQ handler
- */
-static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
-{
-       if (c2dev->qptr_array[mq_index] == NULL) {
-               pr_debug("handle_mq: stray activity for mq_index=%d\n",
-                        mq_index);
-               return;
-       }
-
-       switch (mq_index) {
-       case (0):
-               /*
-                * An index of 0 in the activity queue
-                * indicates the req vq now has messages
-                * available...
-                *
-                * Wake up any waiters waiting on req VQ
-                * message availability.
-                */
-               wake_up(&c2dev->req_vq_wo);
-               break;
-       case (1):
-               handle_vq(c2dev, mq_index);
-               break;
-       case (2):
-               /* We have to purge the VQ in case there are pending
-                * accept reply requests that would result in the
-                * generation of an ESTABLISHED event. If we don't
-                * generate these first, a CLOSE event could end up
-                * being delivered before the ESTABLISHED event.
-                */
-               handle_vq(c2dev, 1);
-
-               c2_ae_event(c2dev, mq_index);
-               break;
-       default:
-               /* There is no event synchronization between CQ events
-                * and AE or CM events. In fact, CQE could be
-                * delivered for all of the I/O up to and including the
-                * FLUSH for a peer disconenct prior to the ESTABLISHED
-                * event being delivered to the app. The reason for this
-                * is that CM events are delivered on a thread, while AE
-                * and CM events are delivered on interrupt context.
-                */
-               c2_cq_event(c2dev, mq_index);
-               break;
-       }
-
-       return;
-}
-
-/*
- * Handles verbs WR replies.
- */
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
-{
-       void *adapter_msg, *reply_msg;
-       struct c2wr_hdr *host_msg;
-       struct c2wr_hdr tmp;
-       struct c2_mq *reply_vq;
-       struct c2_vq_req *req;
-       struct iw_cm_event cm_event;
-       int err;
-
-       reply_vq = c2dev->qptr_array[mq_index];
-
-       /*
-        * get next msg from mq_index into adapter_msg.
-        * don't free it yet.
-        */
-       adapter_msg = c2_mq_consume(reply_vq);
-       if (adapter_msg == NULL) {
-               return;
-       }
-
-       host_msg = vq_repbuf_alloc(c2dev);
-
-       /*
-        * If we can't get a host buffer, then we'll still
-        * wakeup the waiter, we just won't give him the msg.
-        * It is assumed the waiter will deal with this...
-        */
-       if (!host_msg) {
-               pr_debug("handle_vq: no repbufs!\n");
-
-               /*
-                * just copy the WR header into a local variable.
-                * this allows us to still demux on the context
-                */
-               host_msg = &tmp;
-               memcpy(host_msg, adapter_msg, sizeof(tmp));
-               reply_msg = NULL;
-       } else {
-               memcpy(host_msg, adapter_msg, reply_vq->msg_size);
-               reply_msg = host_msg;
-       }
-
-       /*
-        * consume the msg from the MQ
-        */
-       c2_mq_free(reply_vq);
-
-       /*
-        * wakeup the waiter.
-        */
-       req = (struct c2_vq_req *) (unsigned long) host_msg->context;
-       if (req == NULL) {
-               /*
-                * We should never get here, as the adapter should
-                * never send us a reply that we're not expecting.
-                */
-               if (reply_msg != NULL)
-                       vq_repbuf_free(c2dev, host_msg);
-               pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
-               return;
-       }
-
-       if (reply_msg)
-               err = c2_errno(reply_msg);
-       else
-               err = -ENOMEM;
-
-       if (!err) switch (req->event) {
-       case IW_CM_EVENT_ESTABLISHED:
-               c2_set_qp_state(req->qp,
-                               C2_QP_STATE_RTS);
-               /*
-                * Until ird/ord negotiation via MPAv2 support is added, send
-                * max supported values
-                */
-               cm_event.ird = cm_event.ord = 128;
-       case IW_CM_EVENT_CLOSE:
-
-               /*
-                * Move the QP to RTS if this is
-                * the established event
-                */
-               cm_event.event = req->event;
-               cm_event.status = 0;
-               cm_event.local_addr = req->cm_id->local_addr;
-               cm_event.remote_addr = req->cm_id->remote_addr;
-               cm_event.private_data = NULL;
-               cm_event.private_data_len = 0;
-               req->cm_id->event_handler(req->cm_id, &cm_event);
-               break;
-       default:
-               break;
-       }
-
-       req->reply_msg = (u64) (unsigned long) (reply_msg);
-       atomic_set(&req->reply_ready, 1);
-       wake_up(&req->wait_object);
-
-       /*
-        * If the request was cancelled, then this put will
-        * free the vq_req memory...and reply_msg!!!
-        */
-       vq_req_put(c2dev, req);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
deleted file mode 100644 (file)
index 25081e2..0000000
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-
-#define PBL_VIRT 1
-#define PBL_PHYS 2
-
-/*
- * Send all the PBL messages to convey the remainder of the PBL
- * Wait for the adapter's reply on the last one.
- * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
- *
- * NOTE:  vq_req is _not_ freed by this function.  The VQ Host
- *       Reply buffer _is_ freed by this function.
- */
-static int
-send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
-                 unsigned long va, u32 pbl_depth,
-                 struct c2_vq_req *vq_req, int pbl_type)
-{
-       u32 pbe_count;          /* amt that fits in a PBL msg */
-       u32 count;              /* amt in this PBL MSG. */
-       struct c2wr_nsmr_pbl_req *wr;   /* PBL WR ptr */
-       struct c2wr_nsmr_pbl_rep *reply;        /* reply ptr */
-       int err, pbl_virt, pbl_index, i;
-
-       switch (pbl_type) {
-       case PBL_VIRT:
-               pbl_virt = 1;
-               break;
-       case PBL_PHYS:
-               pbl_virt = 0;
-               break;
-       default:
-               return -EINVAL;
-               break;
-       }
-
-       pbe_count = (c2dev->req_vq.msg_size -
-                    sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               return -ENOMEM;
-       }
-       c2_wr_set_id(wr, CCWR_NSMR_PBL);
-
-       /*
-        * Only the last PBL message will generate a reply from the verbs,
-        * so we set the context to 0 indicating there is no kernel verbs
-        * handler blocked awaiting this reply.
-        */
-       wr->hdr.context = 0;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->stag_index = stag_index;    /* already swapped */
-       wr->flags = 0;
-       pbl_index = 0;
-       while (pbl_depth) {
-               count = min(pbe_count, pbl_depth);
-               wr->addrs_length = cpu_to_be32(count);
-
-               /*
-                *  If this is the last message, then reference the
-                *  vq request struct cuz we're gonna wait for a reply.
-                *  also make this PBL msg as the last one.
-                */
-               if (count == pbl_depth) {
-                       /*
-                        * reference the request struct.  dereferenced in the
-                        * int handler.
-                        */
-                       vq_req_get(c2dev, vq_req);
-                       wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
-
-                       /*
-                        * This is the last PBL message.
-                        * Set the context to our VQ Request Object so we can
-                        * wait for the reply.
-                        */
-                       wr->hdr.context = (unsigned long) vq_req;
-               }
-
-               /*
-                * If pbl_virt is set then va is a virtual address
-                * that describes a virtually contiguous memory
-                * allocation. The wr needs the start of each virtual page
-                * to be converted to the corresponding physical address
-                * of the page. If pbl_virt is not set then va is an array
-                * of physical addresses and there is no conversion to do.
-                * Just fill in the wr with what is in the array.
-                */
-               for (i = 0; i < count; i++) {
-                       if (pbl_virt) {
-                               va += PAGE_SIZE;
-                       } else {
-                               wr->paddrs[i] =
-                                   cpu_to_be64(((u64 *)va)[pbl_index + i]);
-                       }
-               }
-
-               /*
-                * Send WR to adapter
-                */
-               err = vq_send_wr(c2dev, (union c2wr *) wr);
-               if (err) {
-                       if (count <= pbe_count) {
-                               vq_req_put(c2dev, vq_req);
-                       }
-                       goto bail0;
-               }
-               pbl_depth -= count;
-               pbl_index += count;
-       }
-
-       /*
-        *  Now wait for the reply...
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       kfree(wr);
-       return err;
-}
-
-#define C2_PBL_MAX_DEPTH 131072
-int
-c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
-                          int page_size, int pbl_depth, u32 length,
-                          u32 offset, u64 *va, enum c2_acf acf,
-                          struct c2_mr *mr)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_nsmr_register_req *wr;
-       struct c2wr_nsmr_register_rep *reply;
-       u16 flags;
-       int i, pbe_count, count;
-       int err;
-
-       if (!va || !length || !addr_list || !pbl_depth)
-               return -EINTR;
-
-       /*
-        * Verify PBL depth is within rnic max
-        */
-       if (pbl_depth > C2_PBL_MAX_DEPTH) {
-               return -EINTR;
-       }
-
-       /*
-        * allocate verbs request object
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       /*
-        * build the WR
-        */
-       c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-
-       flags = (acf | MEM_VA_BASED | MEM_REMOTE);
-
-       /*
-        * compute how many pbes can fit in the message
-        */
-       pbe_count = (c2dev->req_vq.msg_size -
-                    sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
-
-       if (pbl_depth <= pbe_count) {
-               flags |= MEM_PBL_COMPLETE;
-       }
-       wr->flags = cpu_to_be16(flags);
-       wr->stag_key = 0;       //stag_key;
-       wr->va = cpu_to_be64(*va);
-       wr->pd_id = mr->pd->pd_id;
-       wr->pbe_size = cpu_to_be32(page_size);
-       wr->length = cpu_to_be32(length);
-       wr->pbl_depth = cpu_to_be32(pbl_depth);
-       wr->fbo = cpu_to_be32(offset);
-       count = min(pbl_depth, pbe_count);
-       wr->addrs_length = cpu_to_be32(count);
-
-       /*
-        * fill out the PBL for this message
-        */
-       for (i = 0; i < count; i++) {
-               wr->paddrs[i] = cpu_to_be64(addr_list[i]);
-       }
-
-       /*
-        * regerence the request struct
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * send the WR to the adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       /*
-        * wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail1;
-       }
-
-       /*
-        * process reply
-        */
-       reply =
-           (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-       if ((err = c2_errno(reply))) {
-               goto bail2;
-       }
-       //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
-       mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
-       vq_repbuf_free(c2dev, reply);
-
-       /*
-        * if there are still more PBEs we need to send them to
-        * the adapter and wait for a reply on the final one.
-        * reuse vq_req for this purpose.
-        */
-       pbl_depth -= count;
-       if (pbl_depth) {
-
-               vq_req->reply_msg = (unsigned long) NULL;
-               atomic_set(&vq_req->reply_ready, 0);
-               err = send_pbl_messages(c2dev,
-                                       cpu_to_be32(mr->ibmr.lkey),
-                                       (unsigned long) &addr_list[i],
-                                       pbl_depth, vq_req, PBL_PHYS);
-               if (err) {
-                       goto bail1;
-               }
-       }
-
-       vq_req_free(c2dev, vq_req);
-       kfree(wr);
-
-       return err;
-
-bail2:
-       vq_repbuf_free(c2dev, reply);
-bail1:
-       kfree(wr);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
-{
-       struct c2_vq_req *vq_req;       /* verbs request object */
-       struct c2wr_stag_dealloc_req wr;        /* work request */
-       struct c2wr_stag_dealloc_rep *reply;    /* WR reply  */
-       int err;
-
-
-       /*
-        * allocate verbs request object
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               return -ENOMEM;
-       }
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
-       wr.hdr.context = (u64) (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.stag_index = cpu_to_be32(stag_index);
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
deleted file mode 100644 (file)
index 7827fb8..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include "c2_mq.h"
-
-void *c2_mq_alloc(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-       if (c2_mq_full(q)) {
-               return NULL;
-       } else {
-#ifdef DEBUG
-               struct c2wr_hdr *m =
-                   (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
-               BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
-               m->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-               return m;
-#else
-               return q->msg_pool.host + q->priv * q->msg_size;
-#endif
-       }
-}
-
-void c2_mq_produce(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-       if (!c2_mq_full(q)) {
-               q->priv = (q->priv + 1) % q->q_size;
-               q->hint_count++;
-               /* Update peer's offset. */
-               __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
-       }
-}
-
-void *c2_mq_consume(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
-       if (c2_mq_empty(q)) {
-               return NULL;
-       } else {
-#ifdef DEBUG
-               struct c2wr_hdr *m = (struct c2wr_hdr *)
-                   (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
-               BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
-#endif
-               return m;
-#else
-               return q->msg_pool.host + q->priv * q->msg_size;
-#endif
-       }
-}
-
-void c2_mq_free(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
-       if (!c2_mq_empty(q)) {
-
-#ifdef CCMSGMAGIC
-               {
-                       struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
-                           (q->msg_pool.adapter + q->priv * q->msg_size);
-                       __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
-               }
-#endif
-               q->priv = (q->priv + 1) % q->q_size;
-               /* Update peer's offset. */
-               __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
-       }
-}
-
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-       while (wqe_count--) {
-               BUG_ON(c2_mq_empty(q));
-               *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
-       }
-}
-
-#if 0
-u32 c2_mq_count(struct c2_mq *q)
-{
-       s32 count;
-
-       if (q->type == C2_MQ_HOST_TARGET)
-               count = be16_to_cpu(*q->shared) - q->priv;
-       else
-               count = q->priv - be16_to_cpu(*q->shared);
-
-       if (count < 0)
-               count += q->q_size;
-
-       return (u32) count;
-}
-#endif  /*  0  */
-
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                   u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
-{
-       BUG_ON(!q->shared);
-
-       /* This code assumes the byte swapping has already been done! */
-       q->index = index;
-       q->q_size = q_size;
-       q->msg_size = msg_size;
-       q->msg_pool.adapter = pool_start;
-       q->peer = (struct c2_mq_shared __iomem *) peer;
-       q->magic = C2_MQ_MAGIC;
-       q->type = type;
-       q->priv = 0;
-       q->hint_count = 0;
-       return;
-}
-
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                   u8 *pool_start, u16 __iomem *peer, u32 type)
-{
-       BUG_ON(!q->shared);
-
-       /* This code assumes the byte swapping has already been done! */
-       q->index = index;
-       q->q_size = q_size;
-       q->msg_size = msg_size;
-       q->msg_pool.host = pool_start;
-       q->peer = (struct c2_mq_shared __iomem *) peer;
-       q->magic = C2_MQ_MAGIC;
-       q->type = type;
-       q->priv = 0;
-       q->hint_count = 0;
-       return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
deleted file mode 100644 (file)
index 8e1b4d1..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _C2_MQ_H_
-#define _C2_MQ_H_
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include "c2_wr.h"
-
-enum c2_shared_regs {
-
-       C2_SHARED_ARMED = 0x10,
-       C2_SHARED_NOTIFY = 0x18,
-       C2_SHARED_SHARED = 0x40,
-};
-
-struct c2_mq_shared {
-       u16 unused1;
-       u8 armed;
-       u8 notification_type;
-       u32 unused2;
-       u16 shared;
-       /* Pad to 64 bytes. */
-       u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
-};
-
-enum c2_mq_type {
-       C2_MQ_HOST_TARGET = 1,
-       C2_MQ_ADAPTER_TARGET = 2,
-};
-
-/*
- * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
- * c2_user_mq_t (which is the same format) is for user-mode MQs...
- */
-#define C2_MQ_MAGIC 0x4d512020 /* 'MQ  ' */
-struct c2_mq {
-       u32 magic;
-       union {
-               u8 *host;
-               u8 __iomem *adapter;
-       } msg_pool;
-       dma_addr_t host_dma;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       u16 hint_count;
-       u16 priv;
-       struct c2_mq_shared __iomem *peer;
-       __be16 *shared;
-       dma_addr_t shared_dma;
-       u32 q_size;
-       u32 msg_size;
-       u32 index;
-       enum c2_mq_type type;
-};
-
-static __inline__ int c2_mq_empty(struct c2_mq *q)
-{
-       return q->priv == be16_to_cpu(*q->shared);
-}
-
-static __inline__ int c2_mq_full(struct c2_mq *q)
-{
-       return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
-}
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
-void *c2_mq_alloc(struct c2_mq *q);
-void c2_mq_produce(struct c2_mq *q);
-void *c2_mq_consume(struct c2_mq *q);
-void c2_mq_free(struct c2_mq *q);
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                      u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                          u8 *pool_start, u16 __iomem *peer, u32 type);
-
-#endif                         /* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
deleted file mode 100644 (file)
index f3e81dc..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-
-#include "c2.h"
-#include "c2_provider.h"
-
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
-{
-       u32 obj;
-       int ret = 0;
-
-       spin_lock(&c2dev->pd_table.lock);
-       obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
-                                c2dev->pd_table.last);
-       if (obj >= c2dev->pd_table.max)
-               obj = find_first_zero_bit(c2dev->pd_table.table,
-                                         c2dev->pd_table.max);
-       if (obj < c2dev->pd_table.max) {
-               pd->pd_id = obj;
-               __set_bit(obj, c2dev->pd_table.table);
-               c2dev->pd_table.last = obj+1;
-               if (c2dev->pd_table.last >= c2dev->pd_table.max)
-                       c2dev->pd_table.last = 0;
-       } else
-               ret = -ENOMEM;
-       spin_unlock(&c2dev->pd_table.lock);
-       return ret;
-}
-
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
-{
-       spin_lock(&c2dev->pd_table.lock);
-       __clear_bit(pd->pd_id, c2dev->pd_table.table);
-       spin_unlock(&c2dev->pd_table.lock);
-}
-
-int c2_init_pd_table(struct c2_dev *c2dev)
-{
-
-       c2dev->pd_table.last = 0;
-       c2dev->pd_table.max = c2dev->props.max_pd;
-       spin_lock_init(&c2dev->pd_table.lock);
-       c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
-                                       sizeof(long), GFP_KERNEL);
-       if (!c2dev->pd_table.table)
-               return -ENOMEM;
-       bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
-       return 0;
-}
-
-void c2_cleanup_pd_table(struct c2_dev *c2dev)
-{
-       kfree(c2dev->pd_table.table);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
deleted file mode 100644 (file)
index de8d10e..0000000
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/if_arp.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include "c2.h"
-#include "c2_provider.h"
-#include "c2_user.h"
-
-static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                          struct ib_udata *uhw)
-{
-       struct c2_dev *c2dev = to_c2dev(ibdev);
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       if (uhw->inlen || uhw->outlen)
-               return -EINVAL;
-
-       *props = c2dev->props;
-       return 0;
-}
-
-static int c2_query_port(struct ib_device *ibdev,
-                        u8 port, struct ib_port_attr *props)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       props->max_mtu = IB_MTU_4096;
-       props->lid = 0;
-       props->lmc = 0;
-       props->sm_lid = 0;
-       props->sm_sl = 0;
-       props->state = IB_PORT_ACTIVE;
-       props->phys_state = 0;
-       props->port_cap_flags =
-           IB_PORT_CM_SUP |
-           IB_PORT_REINIT_SUP |
-           IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
-       props->gid_tbl_len = 1;
-       props->pkey_tbl_len = 1;
-       props->qkey_viol_cntr = 0;
-       props->active_width = 1;
-       props->active_speed = IB_SPEED_SDR;
-
-       return 0;
-}
-
-static int c2_query_pkey(struct ib_device *ibdev,
-                        u8 port, u16 index, u16 * pkey)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       *pkey = 0;
-       return 0;
-}
-
-static int c2_query_gid(struct ib_device *ibdev, u8 port,
-                       int index, union ib_gid *gid)
-{
-       struct c2_dev *c2dev = to_c2dev(ibdev);
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       memset(&(gid->raw[0]), 0, sizeof(gid->raw));
-       memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
-
-       return 0;
-}
-
-/* Allocate the user context data structure. This keeps track
- * of all objects associated with a particular user-mode client.
- */
-static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
-                                            struct ib_udata *udata)
-{
-       struct c2_ucontext *context;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       context = kmalloc(sizeof(*context), GFP_KERNEL);
-       if (!context)
-               return ERR_PTR(-ENOMEM);
-
-       return &context->ibucontext;
-}
-
-static int c2_dealloc_ucontext(struct ib_ucontext *context)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       kfree(context);
-       return 0;
-}
-
-static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
-                                struct ib_ucontext *context,
-                                struct ib_udata *udata)
-{
-       struct c2_pd *pd;
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       pd = kmalloc(sizeof(*pd), GFP_KERNEL);
-       if (!pd)
-               return ERR_PTR(-ENOMEM);
-
-       err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
-       if (err) {
-               kfree(pd);
-               return ERR_PTR(err);
-       }
-
-       if (context) {
-               if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
-                       c2_pd_free(to_c2dev(ibdev), pd);
-                       kfree(pd);
-                       return ERR_PTR(-EFAULT);
-               }
-       }
-
-       return &pd->ibpd;
-}
-
-static int c2_dealloc_pd(struct ib_pd *pd)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
-       kfree(pd);
-
-       return 0;
-}
-
-static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return ERR_PTR(-ENOSYS);
-}
-
-static int c2_ah_destroy(struct ib_ah *ah)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static void c2_add_ref(struct ib_qp *ibqp)
-{
-       struct c2_qp *qp;
-       BUG_ON(!ibqp);
-       qp = to_c2qp(ibqp);
-       atomic_inc(&qp->refcount);
-}
-
-static void c2_rem_ref(struct ib_qp *ibqp)
-{
-       struct c2_qp *qp;
-       BUG_ON(!ibqp);
-       qp = to_c2qp(ibqp);
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-}
-
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
-{
-       struct c2_dev* c2dev = to_c2dev(device);
-       struct c2_qp *qp;
-
-       qp = c2_find_qpn(c2dev, qpn);
-       pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
-               __func__, qp, qpn, device,
-               (qp?atomic_read(&qp->refcount):0));
-
-       return (qp?&qp->ibqp:NULL);
-}
-
-static struct ib_qp *c2_create_qp(struct ib_pd *pd,
-                                 struct ib_qp_init_attr *init_attr,
-                                 struct ib_udata *udata)
-{
-       struct c2_qp *qp;
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       if (init_attr->create_flags)
-               return ERR_PTR(-EINVAL);
-
-       switch (init_attr->qp_type) {
-       case IB_QPT_RC:
-               qp = kzalloc(sizeof(*qp), GFP_KERNEL);
-               if (!qp) {
-                       pr_debug("%s: Unable to allocate QP\n", __func__);
-                       return ERR_PTR(-ENOMEM);
-               }
-               spin_lock_init(&qp->lock);
-               if (pd->uobject) {
-                       /* userspace specific */
-               }
-
-               err = c2_alloc_qp(to_c2dev(pd->device),
-                                 to_c2pd(pd), init_attr, qp);
-
-               if (err && pd->uobject) {
-                       /* userspace specific */
-               }
-
-               break;
-       default:
-               pr_debug("%s: Invalid QP type: %d\n", __func__,
-                       init_attr->qp_type);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (err) {
-               kfree(qp);
-               return ERR_PTR(err);
-       }
-
-       return &qp->ibqp;
-}
-
-static int c2_destroy_qp(struct ib_qp *ib_qp)
-{
-       struct c2_qp *qp = to_c2qp(ib_qp);
-
-       pr_debug("%s:%u qp=%p,qp->state=%d\n",
-               __func__, __LINE__, ib_qp, qp->state);
-       c2_free_qp(to_c2dev(ib_qp->device), qp);
-       kfree(qp);
-       return 0;
-}
-
-static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
-                                 const struct ib_cq_init_attr *attr,
-                                 struct ib_ucontext *context,
-                                 struct ib_udata *udata)
-{
-       int entries = attr->cqe;
-       struct c2_cq *cq;
-       int err;
-
-       if (attr->flags)
-               return ERR_PTR(-EINVAL);
-
-       cq = kmalloc(sizeof(*cq), GFP_KERNEL);
-       if (!cq) {
-               pr_debug("%s: Unable to allocate CQ\n", __func__);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
-       if (err) {
-               pr_debug("%s: error initializing CQ\n", __func__);
-               kfree(cq);
-               return ERR_PTR(err);
-       }
-
-       return &cq->ibcq;
-}
-
-static int c2_destroy_cq(struct ib_cq *ib_cq)
-{
-       struct c2_cq *cq = to_c2cq(ib_cq);
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       c2_free_cq(to_c2dev(ib_cq->device), cq);
-       kfree(cq);
-
-       return 0;
-}
-
-static inline u32 c2_convert_access(int acc)
-{
-       return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
-           (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
-           (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
-           C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
-}
-
-static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
-{
-       struct c2_mr *mr;
-       u64 *page_list;
-       const u32 total_len = 0xffffffff;       /* AMSO1100 limit */
-       int err, page_shift, pbl_depth, i;
-       u64 kva = 0;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       /*
-        * This is a map of all phy mem...use a 32k page_shift.
-        */
-       page_shift = PAGE_SHIFT + 3;
-       pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
-
-       page_list = vmalloc(sizeof(u64) * pbl_depth);
-       if (!page_list) {
-               pr_debug("couldn't vmalloc page_list of size %zd\n",
-                       (sizeof(u64) * pbl_depth));
-               return ERR_PTR(-ENOMEM);
-       }
-
-       for (i = 0; i < pbl_depth; i++)
-               page_list[i] = (i << page_shift);
-
-       mr = kmalloc(sizeof(*mr), GFP_KERNEL);
-       if (!mr) {
-               vfree(page_list);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       mr->pd = to_c2pd(pd);
-       mr->umem = NULL;
-       pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
-               "*iova_start %llx, first pa %llx, last pa %llx\n",
-               __func__, page_shift, pbl_depth, total_len,
-               (unsigned long long) kva,
-               (unsigned long long) page_list[0],
-               (unsigned long long) page_list[pbl_depth-1]);
-       err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
-                                        BIT(page_shift), pbl_depth,
-                                        total_len, 0, &kva,
-                                        c2_convert_access(acc), mr);
-       vfree(page_list);
-       if (err) {
-               kfree(mr);
-               return ERR_PTR(err);
-       }
-
-       return &mr->ibmr;
-}
-
-static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                                   u64 virt, int acc, struct ib_udata *udata)
-{
-       u64 *pages;
-       u64 kva = 0;
-       int shift, n, len;
-       int i, k, entry;
-       int err = 0;
-       struct scatterlist *sg;
-       struct c2_pd *c2pd = to_c2pd(pd);
-       struct c2_mr *c2mr;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
-       if (!c2mr)
-               return ERR_PTR(-ENOMEM);
-       c2mr->pd = c2pd;
-
-       c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
-       if (IS_ERR(c2mr->umem)) {
-               err = PTR_ERR(c2mr->umem);
-               kfree(c2mr);
-               return ERR_PTR(err);
-       }
-
-       shift = ffs(c2mr->umem->page_size) - 1;
-       n = c2mr->umem->nmap;
-
-       pages = kmalloc_array(n, sizeof(u64), GFP_KERNEL);
-       if (!pages) {
-               err = -ENOMEM;
-               goto err;
-       }
-
-       i = 0;
-       for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
-               len = sg_dma_len(sg) >> shift;
-               for (k = 0; k < len; ++k) {
-                       pages[i++] =
-                               sg_dma_address(sg) +
-                               (c2mr->umem->page_size * k);
-               }
-       }
-
-       kva = virt;
-       err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
-                                        pages,
-                                        c2mr->umem->page_size,
-                                        i,
-                                        length,
-                                        ib_umem_offset(c2mr->umem),
-                                        &kva,
-                                        c2_convert_access(acc),
-                                        c2mr);
-       kfree(pages);
-       if (err)
-               goto err;
-       return &c2mr->ibmr;
-
-err:
-       ib_umem_release(c2mr->umem);
-       kfree(c2mr);
-       return ERR_PTR(err);
-}
-
-static int c2_dereg_mr(struct ib_mr *ib_mr)
-{
-       struct c2_mr *mr = to_c2mr(ib_mr);
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
-       if (err)
-               pr_debug("c2_stag_dealloc failed: %d\n", err);
-       else {
-               if (mr->umem)
-                       ib_umem_release(mr->umem);
-               kfree(mr);
-       }
-
-       return err;
-}
-
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
-                       char *buf)
-{
-       struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "%x\n", c2dev->props.hw_ver);
-}
-
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
-                          char *buf)
-{
-       struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "%x.%x.%x\n",
-                      (int) (c2dev->props.fw_ver >> 32),
-                      (int) (c2dev->props.fw_ver >> 16) & 0xffff,
-                      (int) (c2dev->props.fw_ver & 0xffff));
-}
-
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
-                       char *buf)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "AMSO1100\n");
-}
-
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *c2_dev_attributes[] = {
-       &dev_attr_hw_rev,
-       &dev_attr_fw_ver,
-       &dev_attr_hca_type,
-       &dev_attr_board_id
-};
-
-static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                       int attr_mask, struct ib_udata *udata)
-{
-       int err;
-
-       err =
-           c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
-                        attr_mask);
-
-       return err;
-}
-
-static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static int c2_process_mad(struct ib_device *ibdev,
-                         int mad_flags,
-                         u8 port_num,
-                         const struct ib_wc *in_wc,
-                         const struct ib_grh *in_grh,
-                         const struct ib_mad_hdr *in_mad,
-                         size_t in_mad_size,
-                         struct ib_mad_hdr *out_mad,
-                         size_t *out_mad_size,
-                         u16 *out_mad_pkey_index)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       /* Request a connection */
-       return c2_llp_connect(cm_id, iw_param);
-}
-
-static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       /* Accept the new connection */
-       return c2_llp_accept(cm_id, iw_param);
-}
-
-static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       return c2_llp_reject(cm_id, pdata, pdata_len);
-}
-
-static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
-{
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       err = c2_llp_service_create(cm_id, backlog);
-       pr_debug("%s:%u err=%d\n",
-               __func__, __LINE__,
-               err);
-       return err;
-}
-
-static int c2_service_destroy(struct iw_cm_id *cm_id)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       return c2_llp_service_destroy(cm_id);
-}
-
-static int c2_pseudo_up(struct net_device *netdev)
-{
-       struct in_device *ind;
-       struct c2_dev *c2dev = netdev->ml_priv;
-
-       ind = in_dev_get(netdev);
-       if (!ind)
-               return 0;
-
-       pr_debug("adding...\n");
-       for_ifa(ind) {
-#ifdef DEBUG
-               u8 *ip = (u8 *) & ifa->ifa_address;
-
-               pr_debug("%s: %d.%d.%d.%d\n",
-                      ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
-               c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
-       }
-       endfor_ifa(ind);
-       in_dev_put(ind);
-
-       return 0;
-}
-
-static int c2_pseudo_down(struct net_device *netdev)
-{
-       struct in_device *ind;
-       struct c2_dev *c2dev = netdev->ml_priv;
-
-       ind = in_dev_get(netdev);
-       if (!ind)
-               return 0;
-
-       pr_debug("deleting...\n");
-       for_ifa(ind) {
-#ifdef DEBUG
-               u8 *ip = (u8 *) & ifa->ifa_address;
-
-               pr_debug("%s: %d.%d.%d.%d\n",
-                      ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
-               c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
-       }
-       endfor_ifa(ind);
-       in_dev_put(ind);
-
-       return 0;
-}
-
-static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-       kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
-{
-       if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
-               return -EINVAL;
-
-       netdev->mtu = new_mtu;
-
-       /* TODO: Tell rnic about new rmda interface mtu */
-       return 0;
-}
-
-static const struct net_device_ops c2_pseudo_netdev_ops = {
-       .ndo_open               = c2_pseudo_up,
-       .ndo_stop               = c2_pseudo_down,
-       .ndo_start_xmit         = c2_pseudo_xmit_frame,
-       .ndo_change_mtu         = c2_pseudo_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-static void setup(struct net_device *netdev)
-{
-       netdev->netdev_ops = &c2_pseudo_netdev_ops;
-
-       netdev->watchdog_timeo = 0;
-       netdev->type = ARPHRD_ETHER;
-       netdev->mtu = 1500;
-       netdev->hard_header_len = ETH_HLEN;
-       netdev->addr_len = ETH_ALEN;
-       netdev->tx_queue_len = 0;
-       netdev->flags |= IFF_NOARP;
-}
-
-static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
-{
-       char name[IFNAMSIZ];
-       struct net_device *netdev;
-
-       /* change ethxxx to iwxxx */
-       strcpy(name, "iw");
-       strcat(name, &c2dev->netdev->name[3]);
-       netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
-       if (!netdev) {
-               printk(KERN_ERR PFX "%s -  etherdev alloc failed",
-                       __func__);
-               return NULL;
-       }
-
-       netdev->ml_priv = c2dev;
-
-       SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
-       memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
-
-       /* Print out the MAC address */
-       pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
-
-#if 0
-       /* Disable network packets */
-       netif_stop_queue(netdev);
-#endif
-       return netdev;
-}
-
-static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
-                            struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       err = c2_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
-       return 0;
-}
-
-int c2_register_device(struct c2_dev *dev)
-{
-       int ret = -ENOMEM;
-       int i;
-
-       /* Register pseudo network device */
-       dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
-       if (!dev->pseudo_netdev)
-               goto out;
-
-       ret = register_netdev(dev->pseudo_netdev);
-       if (ret)
-               goto out_free_netdev;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
-       dev->ibdev.owner = THIS_MODULE;
-       dev->ibdev.uverbs_cmd_mask =
-           (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
-           (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
-           (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
-           (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
-           (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
-           (1ull << IB_USER_VERBS_CMD_REG_MR) |
-           (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
-           (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-           (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
-           (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
-           (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
-           (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
-           (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
-           (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
-           (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
-           (1ull << IB_USER_VERBS_CMD_POST_SEND) |
-           (1ull << IB_USER_VERBS_CMD_POST_RECV);
-
-       dev->ibdev.node_type = RDMA_NODE_RNIC;
-       memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
-       memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
-       dev->ibdev.phys_port_cnt = 1;
-       dev->ibdev.num_comp_vectors = 1;
-       dev->ibdev.dma_device = &dev->pcidev->dev;
-       dev->ibdev.query_device = c2_query_device;
-       dev->ibdev.query_port = c2_query_port;
-       dev->ibdev.query_pkey = c2_query_pkey;
-       dev->ibdev.query_gid = c2_query_gid;
-       dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
-       dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
-       dev->ibdev.mmap = c2_mmap_uar;
-       dev->ibdev.alloc_pd = c2_alloc_pd;
-       dev->ibdev.dealloc_pd = c2_dealloc_pd;
-       dev->ibdev.create_ah = c2_ah_create;
-       dev->ibdev.destroy_ah = c2_ah_destroy;
-       dev->ibdev.create_qp = c2_create_qp;
-       dev->ibdev.modify_qp = c2_modify_qp;
-       dev->ibdev.destroy_qp = c2_destroy_qp;
-       dev->ibdev.create_cq = c2_create_cq;
-       dev->ibdev.destroy_cq = c2_destroy_cq;
-       dev->ibdev.poll_cq = c2_poll_cq;
-       dev->ibdev.get_dma_mr = c2_get_dma_mr;
-       dev->ibdev.reg_user_mr = c2_reg_user_mr;
-       dev->ibdev.dereg_mr = c2_dereg_mr;
-       dev->ibdev.get_port_immutable = c2_port_immutable;
-
-       dev->ibdev.alloc_fmr = NULL;
-       dev->ibdev.unmap_fmr = NULL;
-       dev->ibdev.dealloc_fmr = NULL;
-       dev->ibdev.map_phys_fmr = NULL;
-
-       dev->ibdev.attach_mcast = c2_multicast_attach;
-       dev->ibdev.detach_mcast = c2_multicast_detach;
-       dev->ibdev.process_mad = c2_process_mad;
-
-       dev->ibdev.req_notify_cq = c2_arm_cq;
-       dev->ibdev.post_send = c2_post_send;
-       dev->ibdev.post_recv = c2_post_receive;
-
-       dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
-       if (dev->ibdev.iwcm == NULL) {
-               ret = -ENOMEM;
-               goto out_unregister_netdev;
-       }
-       dev->ibdev.iwcm->add_ref = c2_add_ref;
-       dev->ibdev.iwcm->rem_ref = c2_rem_ref;
-       dev->ibdev.iwcm->get_qp = c2_get_qp;
-       dev->ibdev.iwcm->connect = c2_connect;
-       dev->ibdev.iwcm->accept = c2_accept;
-       dev->ibdev.iwcm->reject = c2_reject;
-       dev->ibdev.iwcm->create_listen = c2_service_create;
-       dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
-
-       ret = ib_register_device(&dev->ibdev, NULL);
-       if (ret)
-               goto out_free_iwcm;
-
-       for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
-               ret = device_create_file(&dev->ibdev.dev,
-                                              c2_dev_attributes[i]);
-               if (ret)
-                       goto out_unregister_ibdev;
-       }
-       goto out;
-
-out_unregister_ibdev:
-       ib_unregister_device(&dev->ibdev);
-out_free_iwcm:
-       kfree(dev->ibdev.iwcm);
-out_unregister_netdev:
-       unregister_netdev(dev->pseudo_netdev);
-out_free_netdev:
-       free_netdev(dev->pseudo_netdev);
-out:
-       pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
-       return ret;
-}
-
-void c2_unregister_device(struct c2_dev *dev)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       unregister_netdev(dev->pseudo_netdev);
-       free_netdev(dev->pseudo_netdev);
-       ib_unregister_device(&dev->ibdev);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
deleted file mode 100644 (file)
index bf18998..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_PROVIDER_H
-#define C2_PROVIDER_H
-#include <linux/inetdevice.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-
-#include "c2_mq.h"
-#include <rdma/iw_cm.h>
-
-#define C2_MPT_FLAG_ATOMIC        (1 << 14)
-#define C2_MPT_FLAG_REMOTE_WRITE  (1 << 13)
-#define C2_MPT_FLAG_REMOTE_READ   (1 << 12)
-#define C2_MPT_FLAG_LOCAL_WRITE   (1 << 11)
-#define C2_MPT_FLAG_LOCAL_READ    (1 << 10)
-
-struct c2_buf_list {
-       void *buf;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-};
-
-
-/* The user context keeps track of objects allocated for a
- * particular user-mode client. */
-struct c2_ucontext {
-       struct ib_ucontext ibucontext;
-};
-
-struct c2_mtt;
-
-/* All objects associated with a PD are kept in the
- * associated user context if present.
- */
-struct c2_pd {
-       struct ib_pd ibpd;
-       u32 pd_id;
-};
-
-struct c2_mr {
-       struct ib_mr ibmr;
-       struct c2_pd *pd;
-       struct ib_umem *umem;
-};
-
-struct c2_av;
-
-enum c2_ah_type {
-       C2_AH_ON_HCA,
-       C2_AH_PCI_POOL,
-       C2_AH_KMALLOC
-};
-
-struct c2_ah {
-       struct ib_ah ibah;
-};
-
-struct c2_cq {
-       struct ib_cq ibcq;
-       spinlock_t lock;
-       atomic_t refcount;
-       int cqn;
-       int is_kernel;
-       wait_queue_head_t wait;
-
-       u32 adapter_handle;
-       struct c2_mq mq;
-};
-
-struct c2_wq {
-       spinlock_t lock;
-};
-struct iw_cm_id;
-struct c2_qp {
-       struct ib_qp ibqp;
-       struct iw_cm_id *cm_id;
-       spinlock_t lock;
-       atomic_t refcount;
-       wait_queue_head_t wait;
-       int qpn;
-
-       u32 adapter_handle;
-       u32 send_sgl_depth;
-       u32 recv_sgl_depth;
-       u32 rdma_write_sgl_depth;
-       u8 state;
-
-       struct c2_mq sq_mq;
-       struct c2_mq rq_mq;
-};
-
-struct c2_cr_query_attrs {
-       u32 local_addr;
-       u32 remote_addr;
-       u16 local_port;
-       u16 remote_port;
-};
-
-static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
-{
-       return container_of(ibpd, struct c2_pd, ibpd);
-}
-
-static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
-{
-       return container_of(ibucontext, struct c2_ucontext, ibucontext);
-}
-
-static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
-{
-       return container_of(ibmr, struct c2_mr, ibmr);
-}
-
-
-static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
-{
-       return container_of(ibah, struct c2_ah, ibah);
-}
-
-static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
-{
-       return container_of(ibcq, struct c2_cq, ibcq);
-}
-
-static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
-{
-       return container_of(ibqp, struct c2_qp, ibqp);
-}
-
-static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
-{
-       struct in_device *ind;
-       int ret = 0;
-
-       ind = in_dev_get(netdev);
-       if (!ind)
-               return 0;
-
-       for_ifa(ind) {
-               if (ifa->ifa_address == addr) {
-                       ret = 1;
-                       break;
-               }
-       }
-       endfor_ifa(ind);
-       in_dev_put(ind);
-       return ret;
-}
-#endif                         /* C2_PROVIDER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
deleted file mode 100644 (file)
index ca364db..0000000
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_MAX_ORD_PER_QP 128
-#define C2_MAX_IRD_PER_QP 128
-
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-#define NO_SUPPORT -1
-static const u8 c2_opcode[] = {
-       [IB_WR_SEND] = C2_WR_TYPE_SEND,
-       [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
-       [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
-       [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
-       [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
-       [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
-};
-
-static int to_c2_state(enum ib_qp_state ib_state)
-{
-       switch (ib_state) {
-       case IB_QPS_RESET:
-               return C2_QP_STATE_IDLE;
-       case IB_QPS_RTS:
-               return C2_QP_STATE_RTS;
-       case IB_QPS_SQD:
-               return C2_QP_STATE_CLOSING;
-       case IB_QPS_SQE:
-               return C2_QP_STATE_CLOSING;
-       case IB_QPS_ERR:
-               return C2_QP_STATE_ERROR;
-       default:
-               return -1;
-       }
-}
-
-static int to_ib_state(enum c2_qp_state c2_state)
-{
-       switch (c2_state) {
-       case C2_QP_STATE_IDLE:
-               return IB_QPS_RESET;
-       case C2_QP_STATE_CONNECTING:
-               return IB_QPS_RTR;
-       case C2_QP_STATE_RTS:
-               return IB_QPS_RTS;
-       case C2_QP_STATE_CLOSING:
-               return IB_QPS_SQD;
-       case C2_QP_STATE_ERROR:
-               return IB_QPS_ERR;
-       case C2_QP_STATE_TERMINATE:
-               return IB_QPS_SQE;
-       default:
-               return -1;
-       }
-}
-
-static const char *to_ib_state_str(int ib_state)
-{
-       static const char *state_str[] = {
-               "IB_QPS_RESET",
-               "IB_QPS_INIT",
-               "IB_QPS_RTR",
-               "IB_QPS_RTS",
-               "IB_QPS_SQD",
-               "IB_QPS_SQE",
-               "IB_QPS_ERR"
-       };
-       if (ib_state < IB_QPS_RESET ||
-           ib_state > IB_QPS_ERR)
-               return "<invalid IB QP state>";
-
-       ib_state -= IB_QPS_RESET;
-       return state_str[ib_state];
-}
-
-void c2_set_qp_state(struct c2_qp *qp, int c2_state)
-{
-       int new_state = to_ib_state(c2_state);
-
-       pr_debug("%s: qp[%p] state modify %s --> %s\n",
-              __func__,
-               qp,
-               to_ib_state_str(qp->state),
-               to_ib_state_str(new_state));
-       qp->state = new_state;
-}
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
-                struct ib_qp_attr *attr, int attr_mask)
-{
-       struct c2wr_qp_modify_req wr;
-       struct c2wr_qp_modify_rep *reply;
-       struct c2_vq_req *vq_req;
-       unsigned long flags;
-       u8 next_state;
-       int err;
-
-       pr_debug("%s:%d qp=%p, %s --> %s\n",
-               __func__, __LINE__,
-               qp,
-               to_ib_state_str(qp->state),
-               to_ib_state_str(attr->qp_state));
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       c2_wr_set_id(&wr, CCWR_QP_MODIFY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.qp_handle = qp->adapter_handle;
-       wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
-       if (attr_mask & IB_QP_STATE) {
-               /* Ensure the state is valid */
-               if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
-                       err = -EINVAL;
-                       goto bail0;
-               }
-
-               wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
-
-               if (attr->qp_state == IB_QPS_ERR) {
-                       spin_lock_irqsave(&qp->lock, flags);
-                       if (qp->cm_id && qp->state == IB_QPS_RTS) {
-                               pr_debug("Generating CLOSE event for QP-->ERR, "
-                                       "qp=%p, cm_id=%p\n",qp,qp->cm_id);
-                               /* Generate an CLOSE event */
-                               vq_req->cm_id = qp->cm_id;
-                               vq_req->event = IW_CM_EVENT_CLOSE;
-                       }
-                       spin_unlock_irqrestore(&qp->lock, flags);
-               }
-               next_state =  attr->qp_state;
-
-       } else if (attr_mask & IB_QP_CUR_STATE) {
-
-               if (attr->cur_qp_state != IB_QPS_RTR &&
-                   attr->cur_qp_state != IB_QPS_RTS &&
-                   attr->cur_qp_state != IB_QPS_SQD &&
-                   attr->cur_qp_state != IB_QPS_SQE) {
-                       err = -EINVAL;
-                       goto bail0;
-               } else
-                       wr.next_qp_state =
-                           cpu_to_be32(to_c2_state(attr->cur_qp_state));
-
-               next_state = attr->cur_qp_state;
-
-       } else {
-               err = 0;
-               goto bail0;
-       }
-
-       /* reference the request struct */
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-       if (!err)
-               qp->state = next_state;
-#ifdef DEBUG
-       else
-               pr_debug("%s: c2_errno=%d\n", __func__, err);
-#endif
-       /*
-        * If we're going to error and generating the event here, then
-        * we need to remove the reference because there will be no
-        * close event generated by the adapter
-       */
-       spin_lock_irqsave(&qp->lock, flags);
-       if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
-               qp->cm_id->rem_ref(qp->cm_id);
-               qp->cm_id = NULL;
-       }
-       spin_unlock_irqrestore(&qp->lock, flags);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-
-       pr_debug("%s:%d qp=%p, cur_state=%s\n",
-               __func__, __LINE__,
-               qp,
-               to_ib_state_str(qp->state));
-       return err;
-}
-
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
-                         int ord, int ird)
-{
-       struct c2wr_qp_modify_req wr;
-       struct c2wr_qp_modify_rep *reply;
-       struct c2_vq_req *vq_req;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       c2_wr_set_id(&wr, CCWR_QP_MODIFY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.qp_handle = qp->adapter_handle;
-       wr.ord = cpu_to_be32(ord);
-       wr.ird = cpu_to_be32(ird);
-       wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
-       /* reference the request struct */
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       reply = (struct c2wr_qp_modify_rep *) (unsigned long)
-               vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_qp_destroy_req wr;
-       struct c2wr_qp_destroy_rep *reply;
-       unsigned long flags;
-       int err;
-
-       /*
-        * Allocate a verb request message
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               return -ENOMEM;
-       }
-
-       /*
-        * Initialize the WR
-        */
-       c2_wr_set_id(&wr, CCWR_QP_DESTROY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.qp_handle = qp->adapter_handle;
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       spin_lock_irqsave(&qp->lock, flags);
-       if (qp->cm_id && qp->state == IB_QPS_RTS) {
-               pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
-                       "qp=%p, cm_id=%p\n",qp,qp->cm_id);
-               /* Generate an CLOSE event */
-               vq_req->qp = qp;
-               vq_req->cm_id = qp->cm_id;
-               vq_req->event = IW_CM_EVENT_CLOSE;
-       }
-       spin_unlock_irqrestore(&qp->lock, flags);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       spin_lock_irqsave(&qp->lock, flags);
-       if (qp->cm_id) {
-               qp->cm_id->rem_ref(qp->cm_id);
-               qp->cm_id = NULL;
-       }
-       spin_unlock_irqrestore(&qp->lock, flags);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-       int ret;
-
-       idr_preload(GFP_KERNEL);
-       spin_lock_irq(&c2dev->qp_table.lock);
-
-       ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
-       if (ret >= 0)
-               qp->qpn = ret;
-
-       spin_unlock_irq(&c2dev->qp_table.lock);
-       idr_preload_end();
-       return ret < 0 ? ret : 0;
-}
-
-static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
-{
-       spin_lock_irq(&c2dev->qp_table.lock);
-       idr_remove(&c2dev->qp_table.idr, qpn);
-       spin_unlock_irq(&c2dev->qp_table.lock);
-}
-
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
-{
-       unsigned long flags;
-       struct c2_qp *qp;
-
-       spin_lock_irqsave(&c2dev->qp_table.lock, flags);
-       qp = idr_find(&c2dev->qp_table.idr, qpn);
-       spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
-       return qp;
-}
-
-int c2_alloc_qp(struct c2_dev *c2dev,
-               struct c2_pd *pd,
-               struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
-{
-       struct c2wr_qp_create_req wr;
-       struct c2wr_qp_create_rep *reply;
-       struct c2_vq_req *vq_req;
-       struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
-       struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
-       unsigned long peer_pa;
-       u32 q_size, msg_size, mmap_size;
-       void __iomem *mmap;
-       int err;
-
-       err = c2_alloc_qpn(c2dev, qp);
-       if (err)
-               return err;
-       qp->ibqp.qp_num = qp->qpn;
-       qp->ibqp.qp_type = IB_QPT_RC;
-
-       /* Allocate the SQ and RQ shared pointers */
-       qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                        &qp->sq_mq.shared_dma, GFP_KERNEL);
-       if (!qp->sq_mq.shared) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                        &qp->rq_mq.shared_dma, GFP_KERNEL);
-       if (!qp->rq_mq.shared) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       /* Allocate the verbs request */
-       vq_req = vq_req_alloc(c2dev);
-       if (vq_req == NULL) {
-               err = -ENOMEM;
-               goto bail2;
-       }
-
-       /* Initialize the work request */
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_QP_CREATE);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.sq_cq_handle = send_cq->adapter_handle;
-       wr.rq_cq_handle = recv_cq->adapter_handle;
-       wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
-       wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
-       wr.srq_handle = 0;
-       wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
-                              QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
-       wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
-       wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
-       wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
-       wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
-       wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
-       wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
-       wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
-       wr.pd_id = pd->pd_id;
-       wr.user_context = (unsigned long) qp;
-
-       vq_req_get(c2dev, vq_req);
-
-       /* Send the WR to the adapter */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail3;
-       }
-
-       /* Wait for the verb reply  */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail3;
-       }
-
-       /* Process the reply */
-       reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail3;
-       }
-
-       if ((err = c2_wr_get_result(reply)) != 0) {
-               goto bail4;
-       }
-
-       /* Fill in the kernel QP struct */
-       atomic_set(&qp->refcount, 1);
-       qp->adapter_handle = reply->qp_handle;
-       qp->state = IB_QPS_RESET;
-       qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
-       qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
-       qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
-       init_waitqueue_head(&qp->wait);
-
-       /* Initialize the SQ MQ */
-       q_size = be32_to_cpu(reply->sq_depth);
-       msg_size = be32_to_cpu(reply->sq_msg_size);
-       peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
-       mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
-       mmap = ioremap_nocache(peer_pa, mmap_size);
-       if (!mmap) {
-               err = -ENOMEM;
-               goto bail5;
-       }
-
-       c2_mq_req_init(&qp->sq_mq,
-                      be32_to_cpu(reply->sq_mq_index),
-                      q_size,
-                      msg_size,
-                      mmap + sizeof(struct c2_mq_shared),      /* pool start */
-                      mmap,                            /* peer */
-                      C2_MQ_ADAPTER_TARGET);
-
-       /* Initialize the RQ mq */
-       q_size = be32_to_cpu(reply->rq_depth);
-       msg_size = be32_to_cpu(reply->rq_msg_size);
-       peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
-       mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
-       mmap = ioremap_nocache(peer_pa, mmap_size);
-       if (!mmap) {
-               err = -ENOMEM;
-               goto bail6;
-       }
-
-       c2_mq_req_init(&qp->rq_mq,
-                      be32_to_cpu(reply->rq_mq_index),
-                      q_size,
-                      msg_size,
-                      mmap + sizeof(struct c2_mq_shared),      /* pool start */
-                      mmap,                            /* peer */
-                      C2_MQ_ADAPTER_TARGET);
-
-       vq_repbuf_free(c2dev, reply);
-       vq_req_free(c2dev, vq_req);
-
-       return 0;
-
-bail6:
-       iounmap(qp->sq_mq.peer);
-bail5:
-       destroy_qp(c2dev, qp);
-bail4:
-       vq_repbuf_free(c2dev, reply);
-bail3:
-       vq_req_free(c2dev, vq_req);
-bail2:
-       c2_free_mqsp(qp->rq_mq.shared);
-bail1:
-       c2_free_mqsp(qp->sq_mq.shared);
-bail0:
-       c2_free_qpn(c2dev, qp->qpn);
-       return err;
-}
-
-static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
-       if (send_cq == recv_cq)
-               spin_lock_irq(&send_cq->lock);
-       else if (send_cq > recv_cq) {
-               spin_lock_irq(&send_cq->lock);
-               spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
-       } else {
-               spin_lock_irq(&recv_cq->lock);
-               spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
-       }
-}
-
-static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
-       if (send_cq == recv_cq)
-               spin_unlock_irq(&send_cq->lock);
-       else if (send_cq > recv_cq) {
-               spin_unlock(&recv_cq->lock);
-               spin_unlock_irq(&send_cq->lock);
-       } else {
-               spin_unlock(&send_cq->lock);
-               spin_unlock_irq(&recv_cq->lock);
-       }
-}
-
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-       struct c2_cq *send_cq;
-       struct c2_cq *recv_cq;
-
-       send_cq = to_c2cq(qp->ibqp.send_cq);
-       recv_cq = to_c2cq(qp->ibqp.recv_cq);
-
-       /*
-        * Lock CQs here, so that CQ polling code can do QP lookup
-        * without taking a lock.
-        */
-       c2_lock_cqs(send_cq, recv_cq);
-       c2_free_qpn(c2dev, qp->qpn);
-       c2_unlock_cqs(send_cq, recv_cq);
-
-       /*
-        * Destroy qp in the rnic...
-        */
-       destroy_qp(c2dev, qp);
-
-       /*
-        * Mark any unreaped CQEs as null and void.
-        */
-       c2_cq_clean(c2dev, qp, send_cq->cqn);
-       if (send_cq != recv_cq)
-               c2_cq_clean(c2dev, qp, recv_cq->cqn);
-       /*
-        * Unmap the MQs and return the shared pointers
-        * to the message pool.
-        */
-       iounmap(qp->sq_mq.peer);
-       iounmap(qp->rq_mq.peer);
-       c2_free_mqsp(qp->sq_mq.shared);
-       c2_free_mqsp(qp->rq_mq.shared);
-
-       atomic_dec(&qp->refcount);
-       wait_event(qp->wait, !atomic_read(&qp->refcount));
-}
-
-/*
- * Function: move_sgl
- *
- * Description:
- * Move an SGL from the user's work request struct into a CCIL Work Request
- * message, swapping to WR byte order and ensure the total length doesn't
- * overflow.
- *
- * IN:
- * dst         - ptr to CCIL Work Request message SGL memory.
- * src         - ptr to the consumers SGL memory.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int
-move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
-        u8 * actual_count)
-{
-       u32 tot = 0;            /* running total */
-       u8 acount = 0;          /* running total non-0 len sge's */
-
-       while (count > 0) {
-               /*
-                * If the addition of this SGE causes the
-                * total SGL length to exceed 2^32-1, then
-                * fail-n-bail.
-                *
-                * If the current total plus the next element length
-                * wraps, then it will go negative and be less than the
-                * current total...
-                */
-               if ((tot + src->length) < tot) {
-                       return -EINVAL;
-               }
-               /*
-                * Bug: 1456 (as well as 1498 & 1643)
-                * Skip over any sge's supplied with len=0
-                */
-               if (src->length) {
-                       tot += src->length;
-                       dst->stag = cpu_to_be32(src->lkey);
-                       dst->to = cpu_to_be64(src->addr);
-                       dst->length = cpu_to_be32(src->length);
-                       dst++;
-                       acount++;
-               }
-               src++;
-               count--;
-       }
-
-       if (acount == 0) {
-               /*
-                * Bug: 1476 (as well as 1498, 1456 and 1643)
-                * Setup the SGL in the WR to make it easier for the RNIC.
-                * This way, the FW doesn't have to deal with special cases.
-                * Setting length=0 should be sufficient.
-                */
-               dst->stag = 0;
-               dst->to = 0;
-               dst->length = 0;
-       }
-
-       *p_len = tot;
-       *actual_count = acount;
-       return 0;
-}
-
-/*
- * Function: c2_activity (private function)
- *
- * Description:
- * Post an mq index to the host->adapter activity fifo.
- *
- * IN:
- * c2dev       - ptr to c2dev structure
- * mq_index    - mq index to post
- * shared      - value most recently written to shared
- *
- * OUT:
- *
- * Return:
- * none
- */
-static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
-{
-       /*
-        * First read the register to see if the FIFO is full, and if so,
-        * spin until it's not.  This isn't perfect -- there is no
-        * synchronization among the clients of the register, but in
-        * practice it prevents multiple CPU from hammering the bus
-        * with PCI RETRY. Note that when this does happen, the card
-        * cannot get on the bus and the card and system hang in a
-        * deadlock -- thus the need for this code. [TOT]
-        */
-       while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
-               udelay(10);
-
-       __raw_writel(C2_HINT_MAKE(mq_index, shared),
-                    c2dev->regs + PCI_BAR0_ADAPTER_HINT);
-}
-
-/*
- * Function: qp_wr_post
- *
- * Description:
- * This in-line function allocates a MQ msg, then moves the host-copy of
- * the completed WR into msg.  Then it posts the message.
- *
- * IN:
- * q           - ptr to user MQ.
- * wr          - ptr to host-copy of the WR.
- * qp          - ptr to user qp
- * size                - Number of bytes to post.  Assumed to be divisible by 4.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
-{
-       union c2wr *msg;
-
-       msg = c2_mq_alloc(q);
-       if (msg == NULL) {
-               return -EINVAL;
-       }
-#ifdef CCMSGMAGIC
-       ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-
-       /*
-        * Since all header fields in the WR are the same as the
-        * CQE, set the following so the adapter need not.
-        */
-       c2_wr_set_result(wr, CCERR_PENDING);
-
-       /*
-        * Copy the wr down to the adapter
-        */
-       memcpy((void *) msg, (void *) wr, size);
-
-       c2_mq_produce(q);
-       return 0;
-}
-
-
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
-                struct ib_send_wr **bad_wr)
-{
-       struct c2_dev *c2dev = to_c2dev(ibqp->device);
-       struct c2_qp *qp = to_c2qp(ibqp);
-       union c2wr wr;
-       unsigned long lock_flags;
-       int err = 0;
-
-       u32 flags;
-       u32 tot_len;
-       u8 actual_sge_count;
-       u32 msg_size;
-
-       if (qp->state > IB_QPS_RTS) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       while (ib_wr) {
-
-               flags = 0;
-               wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
-               if (ib_wr->send_flags & IB_SEND_SIGNALED) {
-                       flags |= SQ_SIGNALED;
-               }
-
-               switch (ib_wr->opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_INV:
-                       if (ib_wr->opcode == IB_WR_SEND) {
-                               if (ib_wr->send_flags & IB_SEND_SOLICITED)
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
-                               else
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
-                               wr.sqwr.send.remote_stag = 0;
-                       } else {
-                               if (ib_wr->send_flags & IB_SEND_SOLICITED)
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
-                               else
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
-                               wr.sqwr.send.remote_stag =
-                                       cpu_to_be32(ib_wr->ex.invalidate_rkey);
-                       }
-
-                       msg_size = sizeof(struct c2wr_send_req) +
-                               sizeof(struct c2_data_addr) * ib_wr->num_sge;
-                       if (ib_wr->num_sge > qp->send_sgl_depth) {
-                               err = -EINVAL;
-                               break;
-                       }
-                       if (ib_wr->send_flags & IB_SEND_FENCE) {
-                               flags |= SQ_READ_FENCE;
-                       }
-                       err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
-                                      ib_wr->sg_list,
-                                      ib_wr->num_sge,
-                                      &tot_len, &actual_sge_count);
-                       wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
-                       c2_wr_set_sge_count(&wr, actual_sge_count);
-                       break;
-               case IB_WR_RDMA_WRITE:
-                       c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
-                       msg_size = sizeof(struct c2wr_rdma_write_req) +
-                           (sizeof(struct c2_data_addr) * ib_wr->num_sge);
-                       if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
-                               err = -EINVAL;
-                               break;
-                       }
-                       if (ib_wr->send_flags & IB_SEND_FENCE) {
-                               flags |= SQ_READ_FENCE;
-                       }
-                       wr.sqwr.rdma_write.remote_stag =
-                           cpu_to_be32(rdma_wr(ib_wr)->rkey);
-                       wr.sqwr.rdma_write.remote_to =
-                           cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
-                       err = move_sgl((struct c2_data_addr *)
-                                      & (wr.sqwr.rdma_write.data),
-                                      ib_wr->sg_list,
-                                      ib_wr->num_sge,
-                                      &tot_len, &actual_sge_count);
-                       wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
-                       c2_wr_set_sge_count(&wr, actual_sge_count);
-                       break;
-               case IB_WR_RDMA_READ:
-                       c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
-                       msg_size = sizeof(struct c2wr_rdma_read_req);
-
-                       /* IWarp only suppots 1 sge for RDMA reads */
-                       if (ib_wr->num_sge > 1) {
-                               err = -EINVAL;
-                               break;
-                       }
-
-                       /*
-                        * Move the local and remote stag/to/len into the WR.
-                        */
-                       wr.sqwr.rdma_read.local_stag =
-                           cpu_to_be32(ib_wr->sg_list->lkey);
-                       wr.sqwr.rdma_read.local_to =
-                           cpu_to_be64(ib_wr->sg_list->addr);
-                       wr.sqwr.rdma_read.remote_stag =
-                           cpu_to_be32(rdma_wr(ib_wr)->rkey);
-                       wr.sqwr.rdma_read.remote_to =
-                           cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
-                       wr.sqwr.rdma_read.length =
-                           cpu_to_be32(ib_wr->sg_list->length);
-                       break;
-               default:
-                       /* error */
-                       msg_size = 0;
-                       err = -EINVAL;
-                       break;
-               }
-
-               /*
-                * If we had an error on the last wr build, then
-                * break out.  Possible errors include bogus WR
-                * type, and a bogus SGL length...
-                */
-               if (err) {
-                       break;
-               }
-
-               /*
-                * Store flags
-                */
-               c2_wr_set_flags(&wr, flags);
-
-               /*
-                * Post the puppy!
-                */
-               spin_lock_irqsave(&qp->lock, lock_flags);
-               err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
-               if (err) {
-                       spin_unlock_irqrestore(&qp->lock, lock_flags);
-                       break;
-               }
-
-               /*
-                * Enqueue mq index to activity FIFO.
-                */
-               c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
-               spin_unlock_irqrestore(&qp->lock, lock_flags);
-
-               ib_wr = ib_wr->next;
-       }
-
-out:
-       if (err)
-               *bad_wr = ib_wr;
-       return err;
-}
-
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
-                   struct ib_recv_wr **bad_wr)
-{
-       struct c2_dev *c2dev = to_c2dev(ibqp->device);
-       struct c2_qp *qp = to_c2qp(ibqp);
-       union c2wr wr;
-       unsigned long lock_flags;
-       int err = 0;
-
-       if (qp->state > IB_QPS_RTS) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       /*
-        * Try and post each work request
-        */
-       while (ib_wr) {
-               u32 tot_len;
-               u8 actual_sge_count;
-
-               if (ib_wr->num_sge > qp->recv_sgl_depth) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               /*
-                * Create local host-copy of the WR
-                */
-               wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
-               c2_wr_set_id(&wr, CCWR_RECV);
-               c2_wr_set_flags(&wr, 0);
-
-               /* sge_count is limited to eight bits. */
-               BUG_ON(ib_wr->num_sge >= 256);
-               err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
-                              ib_wr->sg_list,
-                              ib_wr->num_sge, &tot_len, &actual_sge_count);
-               c2_wr_set_sge_count(&wr, actual_sge_count);
-
-               /*
-                * If we had an error on the last wr build, then
-                * break out.  Possible errors include bogus WR
-                * type, and a bogus SGL length...
-                */
-               if (err) {
-                       break;
-               }
-
-               spin_lock_irqsave(&qp->lock, lock_flags);
-               err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
-               if (err) {
-                       spin_unlock_irqrestore(&qp->lock, lock_flags);
-                       break;
-               }
-
-               /*
-                * Enqueue mq index to activity FIFO
-                */
-               c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
-               spin_unlock_irqrestore(&qp->lock, lock_flags);
-
-               ib_wr = ib_wr->next;
-       }
-
-out:
-       if (err)
-               *bad_wr = ib_wr;
-       return err;
-}
-
-void c2_init_qp_table(struct c2_dev *c2dev)
-{
-       spin_lock_init(&c2dev->qp_table.lock);
-       idr_init(&c2dev->qp_table.idr);
-}
-
-void c2_cleanup_qp_table(struct c2_dev *c2dev)
-{
-       idr_destroy(&c2dev->qp_table.idr);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
deleted file mode 100644 (file)
index 5e65c6d..0000000
+++ /dev/null
@@ -1,652 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/inet.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <linux/route.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_vq.h"
-
-/* Device capabilities */
-#define C2_MIN_PAGESIZE  1024
-
-#define C2_MAX_MRS       32768
-#define C2_MAX_QPS       16000
-#define C2_MAX_WQE_SZ    256
-#define C2_MAX_QP_WR     ((128*1024)/C2_MAX_WQE_SZ)
-#define C2_MAX_SGES      4
-#define C2_MAX_SGE_RD    1
-#define C2_MAX_CQS       32768
-#define C2_MAX_CQES      4096
-#define C2_MAX_PDS       16384
-
-/*
- * Send the adapter INIT message to the amso1100
- */
-static int c2_adapter_init(struct c2_dev *c2dev)
-{
-       struct c2wr_init_req wr;
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_INIT);
-       wr.hdr.context = 0;
-       wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
-       wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
-       wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
-       wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
-       wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
-       wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
-
-       /* Post the init message */
-       return vq_send_wr(c2dev, (union c2wr *) & wr);
-}
-
-/*
- * Send the adapter TERM message to the amso1100
- */
-static void c2_adapter_term(struct c2_dev *c2dev)
-{
-       struct c2wr_init_req wr;
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_TERM);
-       wr.hdr.context = 0;
-
-       /* Post the init message */
-       vq_send_wr(c2dev, (union c2wr *) & wr);
-       c2dev->init = 0;
-
-       return;
-}
-
-/*
- * Query the adapter
- */
-static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_rnic_query_req wr;
-       struct c2wr_rnic_query_rep *reply;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) &wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply =
-           (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply)
-               err = -ENOMEM;
-       else
-               err = c2_errno(reply);
-       if (err)
-               goto bail2;
-
-       props->fw_ver =
-               ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
-               ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
-               (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
-       memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
-       props->max_mr_size         = 0xFFFFFFFF;
-       props->page_size_cap       = ~(C2_MIN_PAGESIZE-1);
-       props->vendor_id           = be32_to_cpu(reply->vendor_id);
-       props->vendor_part_id      = be32_to_cpu(reply->part_number);
-       props->hw_ver              = be32_to_cpu(reply->hw_version);
-       props->max_qp              = be32_to_cpu(reply->max_qps);
-       props->max_qp_wr           = be32_to_cpu(reply->max_qp_depth);
-       props->device_cap_flags    = c2dev->device_cap_flags;
-       props->max_sge             = C2_MAX_SGES;
-       props->max_sge_rd          = C2_MAX_SGE_RD;
-       props->max_cq              = be32_to_cpu(reply->max_cqs);
-       props->max_cqe             = be32_to_cpu(reply->max_cq_depth);
-       props->max_mr              = be32_to_cpu(reply->max_mrs);
-       props->max_pd              = be32_to_cpu(reply->max_pds);
-       props->max_qp_rd_atom      = be32_to_cpu(reply->max_qp_ird);
-       props->max_ee_rd_atom      = 0;
-       props->max_res_rd_atom     = be32_to_cpu(reply->max_global_ird);
-       props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
-       props->max_ee_init_rd_atom = 0;
-       props->atomic_cap          = IB_ATOMIC_NONE;
-       props->max_ee              = 0;
-       props->max_rdd             = 0;
-       props->max_mw              = be32_to_cpu(reply->max_mws);
-       props->max_raw_ipv6_qp     = 0;
-       props->max_raw_ethy_qp     = 0;
-       props->max_mcast_grp       = 0;
-       props->max_mcast_qp_attach = 0;
-       props->max_total_mcast_qp_attach = 0;
-       props->max_ah              = 0;
-       props->max_fmr             = 0;
-       props->max_map_per_fmr     = 0;
-       props->max_srq             = 0;
-       props->max_srq_wr          = 0;
-       props->max_srq_sge         = 0;
-       props->max_pkeys           = 0;
-       props->local_ca_ack_delay  = 0;
-
- bail2:
-       vq_repbuf_free(c2dev, reply);
-
- bail1:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Add an IP address to the RNIC interface
- */
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_rnic_setconfig_req *wr;
-       struct c2wr_rnic_setconfig_rep *reply;
-       struct c2_netaddr netaddr;
-       int err, len;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       len = sizeof(struct c2_netaddr);
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
-
-       netaddr.ip_addr = inaddr;
-       netaddr.netmask = inmask;
-       netaddr.mtu = 0;
-
-       memcpy(wr->data, &netaddr, len);
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply =
-           (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-
-bail1:
-       kfree(wr);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Delete an IP address from the RNIC interface
- */
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_rnic_setconfig_req *wr;
-       struct c2wr_rnic_setconfig_rep *reply;
-       struct c2_netaddr netaddr;
-       int err, len;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       len = sizeof(struct c2_netaddr);
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
-
-       netaddr.ip_addr = inaddr;
-       netaddr.netmask = inmask;
-       netaddr.mtu = 0;
-
-       memcpy(wr->data, &netaddr, len);
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply =
-           (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-
-bail1:
-       kfree(wr);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Open a single RNIC instance to use with all
- * low level openib calls
- */
-static int c2_rnic_open(struct c2_dev *c2dev)
-{
-       struct c2_vq_req *vq_req;
-       union c2wr wr;
-       struct c2wr_rnic_open_rep *reply;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (vq_req == NULL) {
-               return -ENOMEM;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
-       wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
-       wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
-       wr.rnic_open.req.port_num = cpu_to_be16(0);
-       wr.rnic_open.req.user_context = (unsigned long) c2dev;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, &wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       if ((err = c2_errno(reply)) != 0) {
-               goto bail1;
-       }
-
-       c2dev->adapter_handle = reply->rnic_handle;
-
-bail1:
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Close the RNIC instance
- */
-static int c2_rnic_close(struct c2_dev *c2dev)
-{
-       struct c2_vq_req *vq_req;
-       union c2wr wr;
-       struct c2wr_rnic_close_rep *reply;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (vq_req == NULL) {
-               return -ENOMEM;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
-       wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
-       wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, &wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       if ((err = c2_errno(reply)) != 0) {
-               goto bail1;
-       }
-
-       c2dev->adapter_handle = 0;
-
-bail1:
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Called by c2_probe to initialize the RNIC. This principally
- * involves initializing the various limits and resource pools that
- * comprise the RNIC instance.
- */
-int c2_rnic_init(struct c2_dev *c2dev)
-{
-       int err;
-       u32 qsize, msgsize;
-       void *q1_pages;
-       void *q2_pages;
-       void __iomem *mmio_regs;
-
-       /* Device capabilities */
-       c2dev->device_cap_flags =
-           (IB_DEVICE_RESIZE_MAX_WR |
-            IB_DEVICE_CURR_QP_STATE_MOD |
-            IB_DEVICE_SYS_IMAGE_GUID |
-            IB_DEVICE_LOCAL_DMA_LKEY |
-            IB_DEVICE_MEM_WINDOW);
-
-       /* Allocate the qptr_array */
-       c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
-       if (!c2dev->qptr_array) {
-               return -ENOMEM;
-       }
-
-       /* Initialize the qptr_array */
-       c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
-       c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
-       c2dev->qptr_array[2] = (void *) &c2dev->aeq;
-
-       /* Initialize data structures */
-       init_waitqueue_head(&c2dev->req_vq_wo);
-       spin_lock_init(&c2dev->vqlock);
-       spin_lock_init(&c2dev->lock);
-
-       /* Allocate MQ shared pointer pool for kernel clients. User
-        * mode client pools are hung off the user context
-        */
-       err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
-       if (err) {
-               goto bail0;
-       }
-
-       /* Allocate shared pointers for Q0, Q1, and Q2 from
-        * the shared pointer pool.
-        */
-
-       c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                            &c2dev->hint_count_dma,
-                                            GFP_KERNEL);
-       c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                            &c2dev->req_vq.shared_dma,
-                                            GFP_KERNEL);
-       c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                            &c2dev->rep_vq.shared_dma,
-                                            GFP_KERNEL);
-       c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                         &c2dev->aeq.shared_dma, GFP_KERNEL);
-       if (!c2dev->hint_count || !c2dev->req_vq.shared ||
-           !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       mmio_regs = c2dev->kva;
-       /* Initialize the Verbs Request Queue */
-       c2_mq_req_init(&c2dev->req_vq, 0,
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
-                      mmio_regs +
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
-                      mmio_regs +
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
-                      C2_MQ_ADAPTER_TARGET);
-
-       /* Initialize the Verbs Reply Queue */
-       qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
-       msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
-       q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
-                                     &c2dev->rep_vq.host_dma, GFP_KERNEL);
-       if (!q1_pages) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-       dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
-       pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
-                (unsigned long long) c2dev->rep_vq.host_dma);
-       c2_mq_rep_init(&c2dev->rep_vq,
-                  1,
-                  qsize,
-                  msgsize,
-                  q1_pages,
-                  mmio_regs +
-                  be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
-                  C2_MQ_HOST_TARGET);
-
-       /* Initialize the Asynchronus Event Queue */
-       qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
-       msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
-       q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
-                                     &c2dev->aeq.host_dma, GFP_KERNEL);
-       if (!q2_pages) {
-               err = -ENOMEM;
-               goto bail2;
-       }
-       dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
-       pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
-                (unsigned long long) c2dev->aeq.host_dma);
-       c2_mq_rep_init(&c2dev->aeq,
-                      2,
-                      qsize,
-                      msgsize,
-                      q2_pages,
-                      mmio_regs +
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
-                      C2_MQ_HOST_TARGET);
-
-       /* Initialize the verbs request allocator */
-       err = vq_init(c2dev);
-       if (err)
-               goto bail3;
-
-       /* Enable interrupts on the adapter */
-       writel(0, c2dev->regs + C2_IDIS);
-
-       /* create the WR init message */
-       err = c2_adapter_init(c2dev);
-       if (err)
-               goto bail4;
-       c2dev->init++;
-
-       /* open an adapter instance */
-       err = c2_rnic_open(c2dev);
-       if (err)
-               goto bail4;
-
-       /* Initialize cached the adapter limits */
-       err = c2_rnic_query(c2dev, &c2dev->props);
-       if (err)
-               goto bail5;
-
-       /* Initialize the PD pool */
-       err = c2_init_pd_table(c2dev);
-       if (err)
-               goto bail5;
-
-       /* Initialize the QP pool */
-       c2_init_qp_table(c2dev);
-       return 0;
-
-bail5:
-       c2_rnic_close(c2dev);
-bail4:
-       vq_term(c2dev);
-bail3:
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->aeq.q_size * c2dev->aeq.msg_size,
-                         q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
-bail2:
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
-                         q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
-bail1:
-       c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-bail0:
-       vfree(c2dev->qptr_array);
-
-       return err;
-}
-
-/*
- * Called by c2_remove to cleanup the RNIC resources.
- */
-void c2_rnic_term(struct c2_dev *c2dev)
-{
-
-       /* Close the open adapter instance */
-       c2_rnic_close(c2dev);
-
-       /* Send the TERM message to the adapter */
-       c2_adapter_term(c2dev);
-
-       /* Disable interrupts on the adapter */
-       writel(1, c2dev->regs + C2_IDIS);
-
-       /* Free the QP pool */
-       c2_cleanup_qp_table(c2dev);
-
-       /* Free the PD pool */
-       c2_cleanup_pd_table(c2dev);
-
-       /* Free the verbs request allocator */
-       vq_term(c2dev);
-
-       /* Free the asynchronus event queue */
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->aeq.q_size * c2dev->aeq.msg_size,
-                         c2dev->aeq.msg_pool.host,
-                         dma_unmap_addr(&c2dev->aeq, mapping));
-
-       /* Free the verbs reply queue */
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
-                         c2dev->rep_vq.msg_pool.host,
-                         dma_unmap_addr(&c2dev->rep_vq, mapping));
-
-       /* Free the MQ shared pointer pool */
-       c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-
-       /* Free the qptr_array */
-       vfree(c2dev->qptr_array);
-
-       return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
deleted file mode 100644 (file)
index 6ee4aa9..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef        _C2_STATUS_H_
-#define _C2_STATUS_H_
-
-/*
- * Verbs Status Codes
- */
-enum c2_status {
-       C2_OK = 0,              /* This must be zero */
-       CCERR_INSUFFICIENT_RESOURCES = 1,
-       CCERR_INVALID_MODIFIER = 2,
-       CCERR_INVALID_MODE = 3,
-       CCERR_IN_USE = 4,
-       CCERR_INVALID_RNIC = 5,
-       CCERR_INTERRUPTED_OPERATION = 6,
-       CCERR_INVALID_EH = 7,
-       CCERR_INVALID_CQ = 8,
-       CCERR_CQ_EMPTY = 9,
-       CCERR_NOT_IMPLEMENTED = 10,
-       CCERR_CQ_DEPTH_TOO_SMALL = 11,
-       CCERR_PD_IN_USE = 12,
-       CCERR_INVALID_PD = 13,
-       CCERR_INVALID_SRQ = 14,
-       CCERR_INVALID_ADDRESS = 15,
-       CCERR_INVALID_NETMASK = 16,
-       CCERR_INVALID_QP = 17,
-       CCERR_INVALID_QP_STATE = 18,
-       CCERR_TOO_MANY_WRS_POSTED = 19,
-       CCERR_INVALID_WR_TYPE = 20,
-       CCERR_INVALID_SGL_LENGTH = 21,
-       CCERR_INVALID_SQ_DEPTH = 22,
-       CCERR_INVALID_RQ_DEPTH = 23,
-       CCERR_INVALID_ORD = 24,
-       CCERR_INVALID_IRD = 25,
-       CCERR_QP_ATTR_CANNOT_CHANGE = 26,
-       CCERR_INVALID_STAG = 27,
-       CCERR_QP_IN_USE = 28,
-       CCERR_OUTSTANDING_WRS = 29,
-       CCERR_STAG_IN_USE = 30,
-       CCERR_INVALID_STAG_INDEX = 31,
-       CCERR_INVALID_SGL_FORMAT = 32,
-       CCERR_ADAPTER_TIMEOUT = 33,
-       CCERR_INVALID_CQ_DEPTH = 34,
-       CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
-       CCERR_INVALID_EP = 36,
-       CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
-       CCERR_FLUSHED = 38,
-       CCERR_INVALID_WQE = 39,
-       CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
-       CCERR_REMOTE_TERMINATION_ERROR = 41,
-       CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
-       CCERR_ACCESS_VIOLATION = 43,
-       CCERR_INVALID_PD_ID = 44,
-       CCERR_WRAP_ERROR = 45,
-       CCERR_INV_STAG_ACCESS_ERROR = 46,
-       CCERR_ZERO_RDMA_READ_RESOURCES = 47,
-       CCERR_QP_NOT_PRIVILEGED = 48,
-       CCERR_STAG_STATE_NOT_INVALID = 49,
-       CCERR_INVALID_PAGE_SIZE = 50,
-       CCERR_INVALID_BUFFER_SIZE = 51,
-       CCERR_INVALID_PBE = 52,
-       CCERR_INVALID_FBO = 53,
-       CCERR_INVALID_LENGTH = 54,
-       CCERR_INVALID_ACCESS_RIGHTS = 55,
-       CCERR_PBL_TOO_BIG = 56,
-       CCERR_INVALID_VA = 57,
-       CCERR_INVALID_REGION = 58,
-       CCERR_INVALID_WINDOW = 59,
-       CCERR_TOTAL_LENGTH_TOO_BIG = 60,
-       CCERR_INVALID_QP_ID = 61,
-       CCERR_ADDR_IN_USE = 62,
-       CCERR_ADDR_NOT_AVAIL = 63,
-       CCERR_NET_DOWN = 64,
-       CCERR_NET_UNREACHABLE = 65,
-       CCERR_CONN_ABORTED = 66,
-       CCERR_CONN_RESET = 67,
-       CCERR_NO_BUFS = 68,
-       CCERR_CONN_TIMEDOUT = 69,
-       CCERR_CONN_REFUSED = 70,
-       CCERR_HOST_UNREACHABLE = 71,
-       CCERR_INVALID_SEND_SGL_DEPTH = 72,
-       CCERR_INVALID_RECV_SGL_DEPTH = 73,
-       CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
-       CCERR_INSUFFICIENT_PRIVILEGES = 75,
-       CCERR_STACK_ERROR = 76,
-       CCERR_INVALID_VERSION = 77,
-       CCERR_INVALID_MTU = 78,
-       CCERR_INVALID_IMAGE = 79,
-       CCERR_PENDING = 98,     /* not an error; user internally by adapter */
-       CCERR_DEFER = 99,       /* not an error; used internally by adapter */
-       CCERR_FAILED_WRITE = 100,
-       CCERR_FAILED_ERASE = 101,
-       CCERR_FAILED_VERIFICATION = 102,
-       CCERR_NOT_FOUND = 103,
-
-};
-
-/*
- * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
- */
-enum c2_connect_status {
-       C2_CONN_STATUS_SUCCESS = C2_OK,
-       C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
-       C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
-       C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
-       C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
-       C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
-       C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
-       C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
-       C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
-       C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
-       C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
-};
-
-/*
- * Flash programming status codes.
- */
-enum c2_flash_status {
-       C2_FLASH_STATUS_SUCCESS = 0x0000,
-       C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
-       C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
-       C2_FLASH_STATUS_ECLBS = 0x0400,
-       C2_FLASH_STATUS_PSLBS = 0x0800,
-       C2_FLASH_STATUS_VPENS = 0x1000,
-};
-
-#endif                         /* _C2_STATUS_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
deleted file mode 100644 (file)
index 7e9e7ad..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_USER_H
-#define C2_USER_H
-
-#include <linux/types.h>
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct c2_alloc_ucontext_resp {
-       __u32 qp_tab_size;
-       __u32 uarc_size;
-};
-
-struct c2_alloc_pd_resp {
-       __u32 pdn;
-       __u32 reserved;
-};
-
-struct c2_create_cq {
-       __u32 lkey;
-       __u32 pdn;
-       __u64 arm_db_page;
-       __u64 set_db_page;
-       __u32 arm_db_index;
-       __u32 set_db_index;
-};
-
-struct c2_create_cq_resp {
-       __u32 cqn;
-       __u32 reserved;
-};
-
-struct c2_create_qp {
-       __u32 lkey;
-       __u32 reserved;
-       __u64 sq_db_page;
-       __u64 rq_db_page;
-       __u32 sq_db_index;
-       __u32 rq_db_index;
-};
-
-#endif                         /* C2_USER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
deleted file mode 100644 (file)
index 2ec716f..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include "c2_vq.h"
-#include "c2_provider.h"
-
-/*
- * Verbs Request Objects:
- *
- * VQ Request Objects are allocated by the kernel verbs handlers.
- * They contain a wait object, a refcnt, an atomic bool indicating that the
- * adapter has replied, and a copy of the verb reply work request.
- * A pointer to the VQ Request Object is passed down in the context
- * field of the work request message, and reflected back by the adapter
- * in the verbs reply message.  The function handle_vq() in the interrupt
- * path will use this pointer to:
- *     1) append a copy of the verbs reply message
- *     2) mark that the reply is ready
- *     3) wake up the kernel verbs handler blocked awaiting the reply.
- *
- *
- * The kernel verbs handlers do a "get" to put a 2nd reference on the
- * VQ Request object.  If the kernel verbs handler exits before the adapter
- * can respond, this extra reference will keep the VQ Request object around
- * until the adapter's reply can be processed.  The reason we need this is
- * because a pointer to this object is stuffed into the context field of
- * the verbs work request message, and reflected back in the reply message.
- * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
- * kernel verb handler that is blocked awaiting the verb reply.
- * So handle_vq() will do a "put" on the object when it's done accessing it.
- * NOTE:  If we guarantee that the kernel verb handler will never bail before
- *        getting the reply, then we don't need these refcnts.
- *
- *
- * VQ Request objects are freed by the kernel verbs handlers only
- * after the verb has been processed, or when the adapter fails and
- * does not reply.
- *
- *
- * Verbs Reply Buffers:
- *
- * VQ Reply bufs are local host memory copies of a
- * outstanding Verb Request reply
- * message.  The are always allocated by the kernel verbs handlers, and _may_ be
- * freed by either the kernel verbs handler -or- the interrupt handler.  The
- * kernel verbs handler _must_ free the repbuf, then free the vq request object
- * in that order.
- */
-
-int vq_init(struct c2_dev *c2dev)
-{
-       sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
-               (char) ('0' + c2dev->devnum));
-       c2dev->host_msg_cache =
-           kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
-                             SLAB_HWCACHE_ALIGN, NULL);
-       if (c2dev->host_msg_cache == NULL) {
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-void vq_term(struct c2_dev *c2dev)
-{
-       kmem_cache_destroy(c2dev->host_msg_cache);
-}
-
-/* vq_req_alloc - allocate a VQ Request Object and initialize it.
- * The refcnt is set to 1.
- */
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
-{
-       struct c2_vq_req *r;
-
-       r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
-       if (r) {
-               init_waitqueue_head(&r->wait_object);
-               r->reply_msg = 0;
-               r->event = 0;
-               r->cm_id = NULL;
-               r->qp = NULL;
-               atomic_set(&r->refcnt, 1);
-               atomic_set(&r->reply_ready, 0);
-       }
-       return r;
-}
-
-
-/* vq_req_free - free the VQ Request Object.  It is assumed the verbs handler
- * has already free the VQ Reply Buffer if it existed.
- */
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-       r->reply_msg = 0;
-       if (atomic_dec_and_test(&r->refcnt)) {
-               kfree(r);
-       }
-}
-
-/* vq_req_get - reference a VQ Request Object.  Done
- * only in the kernel verbs handlers.
- */
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-       atomic_inc(&r->refcnt);
-}
-
-
-/* vq_req_put - dereference and potentially free a VQ Request Object.
- *
- * This is only called by handle_vq() on the
- * interrupt when it is done processing
- * a verb reply message.  If the associated
- * kernel verbs handler has already bailed,
- * then this put will actually free the VQ
- * Request object _and_ the VQ Reply Buffer
- * if it exists.
- */
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-       if (atomic_dec_and_test(&r->refcnt)) {
-               if (r->reply_msg != 0)
-                       vq_repbuf_free(c2dev,
-                                      (void *) (unsigned long) r->reply_msg);
-               kfree(r);
-       }
-}
-
-
-/*
- * vq_repbuf_alloc - allocate a VQ Reply Buffer.
- */
-void *vq_repbuf_alloc(struct c2_dev *c2dev)
-{
-       return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
-}
-
-/*
- * vq_send_wr - post a verbs request message to the Verbs Request Queue.
- * If a message is not available in the MQ, then block until one is available.
- * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
- * When the adapter drains the Verbs Request Queue,
- * it inserts MQ index 0 in to the
- * adapter->host activity fifo and interrupts the host.
- */
-int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
-{
-       void *msg;
-       wait_queue_t __wait;
-
-       /*
-        * grab adapter vq lock
-        */
-       spin_lock(&c2dev->vqlock);
-
-       /*
-        * allocate msg
-        */
-       msg = c2_mq_alloc(&c2dev->req_vq);
-
-       /*
-        * If we cannot get a msg, then we'll wait
-        * When a messages are available, the int handler will wake_up()
-        * any waiters.
-        */
-       while (msg == NULL) {
-               pr_debug("%s:%d no available msg in VQ, waiting...\n",
-                      __func__, __LINE__);
-               init_waitqueue_entry(&__wait, current);
-               add_wait_queue(&c2dev->req_vq_wo, &__wait);
-               spin_unlock(&c2dev->vqlock);
-               for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (!c2_mq_full(&c2dev->req_vq)) {
-                               break;
-                       }
-                       if (!signal_pending(current)) {
-                               schedule_timeout(1 * HZ);       /* 1 second... */
-                               continue;
-                       }
-                       set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&c2dev->req_vq_wo, &__wait);
-                       return -EINTR;
-               }
-               set_current_state(TASK_RUNNING);
-               remove_wait_queue(&c2dev->req_vq_wo, &__wait);
-               spin_lock(&c2dev->vqlock);
-               msg = c2_mq_alloc(&c2dev->req_vq);
-       }
-
-       /*
-        * copy wr into adapter msg
-        */
-       memcpy(msg, wr, c2dev->req_vq.msg_size);
-
-       /*
-        * post msg
-        */
-       c2_mq_produce(&c2dev->req_vq);
-
-       /*
-        * release adapter vq lock
-        */
-       spin_unlock(&c2dev->vqlock);
-       return 0;
-}
-
-
-/*
- * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
- */
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
-{
-       if (!wait_event_timeout(req->wait_object,
-                               atomic_read(&req->reply_ready),
-                               60*HZ))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
-/*
- * vq_repbuf_free - Free a Verbs Reply Buffer.
- */
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
-{
-       kmem_cache_free(c2dev->host_msg_cache, reply);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
deleted file mode 100644 (file)
index c1f6cef..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_VQ_H_
-#define _C2_VQ_H_
-#include <linux/sched.h>
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_provider.h"
-
-struct c2_vq_req {
-       u64 reply_msg;          /* ptr to reply msg */
-       wait_queue_head_t wait_object;  /* wait object for vq reqs */
-       atomic_t reply_ready;   /* set when reply is ready */
-       atomic_t refcnt;        /* used to cancel WRs... */
-       int event;
-       struct iw_cm_id *cm_id;
-       struct c2_qp *qp;
-};
-
-int vq_init(struct c2_dev *c2dev);
-void vq_term(struct c2_dev *c2dev);
-
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
-int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
-
-void *vq_repbuf_alloc(struct c2_dev *c2dev);
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
-
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
-#endif                         /* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
deleted file mode 100644 (file)
index 8d4b4ca..0000000
+++ /dev/null
@@ -1,1520 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_WR_H_
-#define _C2_WR_H_
-
-#ifdef CCDEBUG
-#define CCWR_MAGIC             0xb07700b0
-#endif
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-/* Maximum allowed size in bytes of private_data exchange
- * on connect.
- */
-#define C2_MAX_PRIVATE_DATA_SIZE 200
-
-/*
- * These types are shared among the adapter, host, and CCIL consumer.
- */
-enum c2_cq_notification_type {
-       C2_CQ_NOTIFICATION_TYPE_NONE = 1,
-       C2_CQ_NOTIFICATION_TYPE_NEXT,
-       C2_CQ_NOTIFICATION_TYPE_NEXT_SE
-};
-
-enum c2_setconfig_cmd {
-       C2_CFG_ADD_ADDR = 1,
-       C2_CFG_DEL_ADDR = 2,
-       C2_CFG_ADD_ROUTE = 3,
-       C2_CFG_DEL_ROUTE = 4
-};
-
-enum c2_getconfig_cmd {
-       C2_GETCONFIG_ROUTES = 1,
-       C2_GETCONFIG_ADDRS
-};
-
-/*
- *  CCIL Work Request Identifiers
- */
-enum c2wr_ids {
-       CCWR_RNIC_OPEN = 1,
-       CCWR_RNIC_QUERY,
-       CCWR_RNIC_SETCONFIG,
-       CCWR_RNIC_GETCONFIG,
-       CCWR_RNIC_CLOSE,
-       CCWR_CQ_CREATE,
-       CCWR_CQ_QUERY,
-       CCWR_CQ_MODIFY,
-       CCWR_CQ_DESTROY,
-       CCWR_QP_CONNECT,
-       CCWR_PD_ALLOC,
-       CCWR_PD_DEALLOC,
-       CCWR_SRQ_CREATE,
-       CCWR_SRQ_QUERY,
-       CCWR_SRQ_MODIFY,
-       CCWR_SRQ_DESTROY,
-       CCWR_QP_CREATE,
-       CCWR_QP_QUERY,
-       CCWR_QP_MODIFY,
-       CCWR_QP_DESTROY,
-       CCWR_NSMR_STAG_ALLOC,
-       CCWR_NSMR_REGISTER,
-       CCWR_NSMR_PBL,
-       CCWR_STAG_DEALLOC,
-       CCWR_NSMR_REREGISTER,
-       CCWR_SMR_REGISTER,
-       CCWR_MR_QUERY,
-       CCWR_MW_ALLOC,
-       CCWR_MW_QUERY,
-       CCWR_EP_CREATE,
-       CCWR_EP_GETOPT,
-       CCWR_EP_SETOPT,
-       CCWR_EP_DESTROY,
-       CCWR_EP_BIND,
-       CCWR_EP_CONNECT,
-       CCWR_EP_LISTEN,
-       CCWR_EP_SHUTDOWN,
-       CCWR_EP_LISTEN_CREATE,
-       CCWR_EP_LISTEN_DESTROY,
-       CCWR_EP_QUERY,
-       CCWR_CR_ACCEPT,
-       CCWR_CR_REJECT,
-       CCWR_CONSOLE,
-       CCWR_TERM,
-       CCWR_FLASH_INIT,
-       CCWR_FLASH,
-       CCWR_BUF_ALLOC,
-       CCWR_BUF_FREE,
-       CCWR_FLASH_WRITE,
-       CCWR_INIT,              /* WARNING: Don't move this ever again! */
-
-
-
-       /* Add new IDs here */
-
-
-
-       /*
-        * WARNING: CCWR_LAST must always be the last verbs id defined!
-        *          All the preceding IDs are fixed, and must not change.
-        *          You can add new IDs, but must not remove or reorder
-        *          any IDs. If you do, YOU will ruin any hope of
-        *          compatibility between versions.
-        */
-       CCWR_LAST,
-
-       /*
-        * Start over at 1 so that arrays indexed by user wr id's
-        * begin at 1.  This is OK since the verbs and user wr id's
-        * are always used on disjoint sets of queues.
-        */
-       /*
-        * The order of the CCWR_SEND_XX verbs must
-        * match the order of the RDMA_OPs
-        */
-       CCWR_SEND = 1,
-       CCWR_SEND_INV,
-       CCWR_SEND_SE,
-       CCWR_SEND_SE_INV,
-       CCWR_RDMA_WRITE,
-       CCWR_RDMA_READ,
-       CCWR_RDMA_READ_INV,
-       CCWR_MW_BIND,
-       CCWR_NSMR_FASTREG,
-       CCWR_STAG_INVALIDATE,
-       CCWR_RECV,
-       CCWR_NOP,
-       CCWR_UNIMPL,
-/* WARNING: This must always be the last user wr id defined! */
-};
-#define RDMA_SEND_OPCODE_FROM_WR_ID(x)   (x+2)
-
-/*
- * SQ/RQ Work Request Types
- */
-enum c2_wr_type {
-       C2_WR_TYPE_SEND = CCWR_SEND,
-       C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
-       C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
-       C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
-       C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
-       C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
-       C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
-       C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
-       C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
-       C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
-       C2_WR_TYPE_RECV = CCWR_RECV,
-       C2_WR_TYPE_NOP = CCWR_NOP,
-};
-
-struct c2_netaddr {
-       __be32 ip_addr;
-       __be32 netmask;
-       u32 mtu;
-};
-
-struct c2_route {
-       u32 ip_addr;            /* 0 indicates the default route */
-       u32 netmask;            /* netmask associated with dst */
-       u32 flags;
-       union {
-               u32 ipaddr;     /* address of the nexthop interface */
-               u8 enaddr[6];
-       } nexthop;
-};
-
-/*
- * A Scatter Gather Entry.
- */
-struct c2_data_addr {
-       __be32 stag;
-       __be32 length;
-       __be64 to;
-};
-
-/*
- * MR and MW flags used by the consumer, RI, and RNIC.
- */
-enum c2_mm_flags {
-       MEM_REMOTE = 0x0001,    /* allow mw binds with remote access. */
-       MEM_VA_BASED = 0x0002,  /* Not Zero-based */
-       MEM_PBL_COMPLETE = 0x0004,      /* PBL array is complete in this msg */
-       MEM_LOCAL_READ = 0x0008,        /* allow local reads */
-       MEM_LOCAL_WRITE = 0x0010,       /* allow local writes */
-       MEM_REMOTE_READ = 0x0020,       /* allow remote reads */
-       MEM_REMOTE_WRITE = 0x0040,      /* allow remote writes */
-       MEM_WINDOW_BIND = 0x0080,       /* binds allowed */
-       MEM_SHARED = 0x0100,    /* set if MR is shared */
-       MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
-};
-
-/*
- * CCIL API ACF flags defined in terms of the low level mem flags.
- * This minimizes translation needed in the user API
- */
-enum c2_acf {
-       C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
-       C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
-       C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
-       C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
-       C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
-};
-
-/*
- * Image types of objects written to flash
- */
-#define C2_FLASH_IMG_BITFILE 1
-#define C2_FLASH_IMG_OPTION_ROM 2
-#define C2_FLASH_IMG_VPD 3
-
-/*
- *  to fix bug 1815 we define the max size allowable of the
- *  terminate message (per the IETF spec).Refer to the IETF
- *  protocol specification, section 12.1.6, page 64)
- *  The message is prefixed by 20 types of DDP info.
- *
- *  Then the message has 6 bytes for the terminate control
- *  and DDP segment length info plus a DDP header (either
- *  14 or 18 byts) plus 28 bytes for the RDMA header.
- *  Thus the max size in:
- *  20 + (6 + 18 + 28) = 72
- */
-#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
-
-/*
- * Build String Length.  It must be the same as C2_BUILD_STR_LEN in ccil_api.h
- */
-#define WR_BUILD_STR_LEN 64
-
-/*
- * WARNING:  All of these structs need to align any 64bit types on
- * 64 bit boundaries!  64bit types include u64 and u64.
- */
-
-/*
- * Clustercore Work Request Header.  Be sensitive to field layout
- * and alignment.
- */
-struct c2wr_hdr {
-       /* wqe_count is part of the cqe.  It is put here so the
-        * adapter can write to it while the wr is pending without
-        * clobbering part of the wr.  This word need not be dma'd
-        * from the host to adapter by libccil, but we copy it anyway
-        * to make the memcpy to the adapter better aligned.
-        */
-       __be32 wqe_count;
-
-       /* Put these fields next so that later 32- and 64-bit
-        * quantities are naturally aligned.
-        */
-       u8 id;
-       u8 result;              /* adapter -> host */
-       u8 sge_count;           /* host -> adapter */
-       u8 flags;               /* host -> adapter */
-
-       u64 context;
-#ifdef CCMSGMAGIC
-       u32 magic;
-       u32 pad;
-#endif
-} __attribute__((packed));
-
-/*
- *------------------------ RNIC ------------------------
- */
-
-/*
- * WR_RNIC_OPEN
- */
-
-/*
- * Flags for the RNIC WRs
- */
-enum c2_rnic_flags {
-       RNIC_IRD_STATIC = 0x0001,
-       RNIC_ORD_STATIC = 0x0002,
-       RNIC_QP_STATIC = 0x0004,
-       RNIC_SRQ_SUPPORTED = 0x0008,
-       RNIC_PBL_BLOCK_MODE = 0x0010,
-       RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
-       RNIC_CQ_OVF_DETECTED = 0x0040,
-       RNIC_PRIV_MODE = 0x0080
-};
-
-struct c2wr_rnic_open_req {
-       struct c2wr_hdr hdr;
-       u64 user_context;
-       __be16 flags;           /* See enum c2_rnic_flags */
-       __be16 port_num;
-} __attribute__((packed));
-
-struct c2wr_rnic_open_rep {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed));
-
-union c2wr_rnic_open {
-       struct c2wr_rnic_open_req req;
-       struct c2wr_rnic_open_rep rep;
-} __attribute__((packed));
-
-struct c2wr_rnic_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_QUERY
- */
-struct c2wr_rnic_query_rep {
-       struct c2wr_hdr hdr;
-       u64 user_context;
-       __be32 vendor_id;
-       __be32 part_number;
-       __be32 hw_version;
-       __be32 fw_ver_major;
-       __be32 fw_ver_minor;
-       __be32 fw_ver_patch;
-       char fw_ver_build_str[WR_BUILD_STR_LEN];
-       __be32 max_qps;
-       __be32 max_qp_depth;
-       u32 max_srq_depth;
-       u32 max_send_sgl_depth;
-       u32 max_rdma_sgl_depth;
-       __be32 max_cqs;
-       __be32 max_cq_depth;
-       u32 max_cq_event_handlers;
-       __be32 max_mrs;
-       u32 max_pbl_depth;
-       __be32 max_pds;
-       __be32 max_global_ird;
-       u32 max_global_ord;
-       __be32 max_qp_ird;
-       __be32 max_qp_ord;
-       u32 flags;
-       __be32 max_mws;
-       u32 pbe_range_low;
-       u32 pbe_range_high;
-       u32 max_srqs;
-       u32 page_size;
-} __attribute__((packed));
-
-union c2wr_rnic_query {
-       struct c2wr_rnic_query_req req;
-       struct c2wr_rnic_query_rep rep;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_GETCONFIG
- */
-
-struct c2wr_rnic_getconfig_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 option;             /* see c2_getconfig_cmd_t */
-       u64 reply_buf;
-       u32 reply_buf_len;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_getconfig_rep {
-       struct c2wr_hdr hdr;
-       u32 option;             /* see c2_getconfig_cmd_t */
-       u32 count_len;          /* length of the number of addresses configured */
-} __attribute__((packed)) ;
-
-union c2wr_rnic_getconfig {
-       struct c2wr_rnic_getconfig_req req;
-       struct c2wr_rnic_getconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_SETCONFIG
- */
-struct c2wr_rnic_setconfig_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       __be32 option;          /* See c2_setconfig_cmd_t */
-       /* variable data and pad. See c2_netaddr and c2_route */
-       u8 data[0];
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_setconfig_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_setconfig {
-       struct c2wr_rnic_setconfig_req req;
-       struct c2wr_rnic_setconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_CLOSE
- */
-struct c2wr_rnic_close_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_close_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_close {
-       struct c2wr_rnic_close_req req;
-       struct c2wr_rnic_close_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ CQ ------------------------
- */
-struct c2wr_cq_create_req {
-       struct c2wr_hdr hdr;
-       __be64 shared_ht;
-       u64 user_context;
-       __be64 msg_pool;
-       u32 rnic_handle;
-       __be32 msg_size;
-       __be32 depth;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_create_rep {
-       struct c2wr_hdr hdr;
-       __be32 mq_index;
-       __be32 adapter_shared;
-       u32 cq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_cq_create {
-       struct c2wr_cq_create_req req;
-       struct c2wr_cq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 cq_handle;
-       u32 new_depth;
-       u64 new_msg_pool;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_modify {
-       struct c2wr_cq_modify_req req;
-       struct c2wr_cq_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 cq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_destroy {
-       struct c2wr_cq_destroy_req req;
-       struct c2wr_cq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ PD ------------------------
- */
-struct c2wr_pd_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_alloc_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_alloc {
-       struct c2wr_pd_alloc_req req;
-       struct c2wr_pd_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_dealloc {
-       struct c2wr_pd_dealloc_req req;
-       struct c2wr_pd_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ SRQ ------------------------
- */
-struct c2wr_srq_create_req {
-       struct c2wr_hdr hdr;
-       u64 shared_ht;
-       u64 user_context;
-       u32 rnic_handle;
-       u32 srq_depth;
-       u32 srq_limit;
-       u32 sgl_depth;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_create_rep {
-       struct c2wr_hdr hdr;
-       u32 srq_depth;
-       u32 sgl_depth;
-       u32 msg_size;
-       u32 mq_index;
-       u32 mq_start;
-       u32 srq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_srq_create {
-       struct c2wr_srq_create_req req;
-       struct c2wr_srq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 srq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_srq_destroy {
-       struct c2wr_srq_destroy_req req;
-       struct c2wr_srq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ QP ------------------------
- */
-enum c2wr_qp_flags {
-       QP_RDMA_READ = 0x00000001,      /* RDMA read enabled? */
-       QP_RDMA_WRITE = 0x00000002,     /* RDMA write enabled? */
-       QP_MW_BIND = 0x00000004,        /* MWs enabled */
-       QP_ZERO_STAG = 0x00000008,      /* enabled? */
-       QP_REMOTE_TERMINATION = 0x00000010,     /* remote end terminated */
-       QP_RDMA_READ_RESPONSE = 0x00000020      /* Remote RDMA read  */
-           /* enabled? */
-};
-
-struct c2wr_qp_create_req {
-       struct c2wr_hdr hdr;
-       __be64 shared_sq_ht;
-       __be64 shared_rq_ht;
-       u64 user_context;
-       u32 rnic_handle;
-       u32 sq_cq_handle;
-       u32 rq_cq_handle;
-       __be32 sq_depth;
-       __be32 rq_depth;
-       u32 srq_handle;
-       u32 srq_limit;
-       __be32 flags;           /* see enum c2wr_qp_flags */
-       __be32 send_sgl_depth;
-       __be32 recv_sgl_depth;
-       __be32 rdma_write_sgl_depth;
-       __be32 ord;
-       __be32 ird;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_create_rep {
-       struct c2wr_hdr hdr;
-       __be32 sq_depth;
-       __be32 rq_depth;
-       u32 send_sgl_depth;
-       u32 recv_sgl_depth;
-       u32 rdma_write_sgl_depth;
-       u32 ord;
-       u32 ird;
-       __be32 sq_msg_size;
-       __be32 sq_mq_index;
-       __be32 sq_mq_start;
-       __be32 rq_msg_size;
-       __be32 rq_mq_index;
-       __be32 rq_mq_start;
-       u32 qp_handle;
-} __attribute__((packed)) ;
-
-union c2wr_qp_create {
-       struct c2wr_qp_create_req req;
-       struct c2wr_qp_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_rep {
-       struct c2wr_hdr hdr;
-       u64 user_context;
-       u32 rnic_handle;
-       u32 sq_depth;
-       u32 rq_depth;
-       u32 send_sgl_depth;
-       u32 rdma_write_sgl_depth;
-       u32 recv_sgl_depth;
-       u32 ord;
-       u32 ird;
-       u16 qp_state;
-       u16 flags;              /* see c2wr_qp_flags_t */
-       u32 qp_id;
-       u32 local_addr;
-       u32 remote_addr;
-       u16 local_port;
-       u16 remote_port;
-       u32 terminate_msg_length;       /* 0 if not present */
-       u8 data[0];
-       /* Terminate Message in-line here. */
-} __attribute__((packed)) ;
-
-union c2wr_qp_query {
-       struct c2wr_qp_query_req req;
-       struct c2wr_qp_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_req {
-       struct c2wr_hdr hdr;
-       u64 stream_msg;
-       u32 stream_msg_length;
-       u32 rnic_handle;
-       u32 qp_handle;
-       __be32 next_qp_state;
-       __be32 ord;
-       __be32 ird;
-       __be32 sq_depth;
-       __be32 rq_depth;
-       u32 llp_ep_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_rep {
-       struct c2wr_hdr hdr;
-       u32 ord;
-       u32 ird;
-       u32 sq_depth;
-       u32 rq_depth;
-       u32 sq_msg_size;
-       u32 sq_mq_index;
-       u32 sq_mq_start;
-       u32 rq_msg_size;
-       u32 rq_mq_index;
-       u32 rq_mq_start;
-} __attribute__((packed)) ;
-
-union c2wr_qp_modify {
-       struct c2wr_qp_modify_req req;
-       struct c2wr_qp_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_qp_destroy {
-       struct c2wr_qp_destroy_req req;
-       struct c2wr_qp_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * The CCWR_QP_CONNECT msg is posted on the verbs request queue.  It can
- * only be posted when a QP is in IDLE state.  After the connect request is
- * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
- * No synchronous reply from adapter to this WR.  The results of
- * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
- * See c2wr_ae_active_connect_results_t
- */
-struct c2wr_qp_connect_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;
-       __be32 remote_addr;
-       __be16 remote_port;
-       u16 pad;
-       __be32 private_data_length;
-       u8 private_data[0];     /* Private data in-line. */
-} __attribute__((packed)) ;
-
-struct c2wr_qp_connect {
-       struct c2wr_qp_connect_req req;
-       /* no synchronous reply.         */
-} __attribute__((packed)) ;
-
-
-/*
- *------------------------ MM ------------------------
- */
-
-struct c2wr_nsmr_stag_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pbl_depth;
-       u32 pd_id;
-       u32 flags;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_stag_alloc_rep {
-       struct c2wr_hdr hdr;
-       u32 pbl_depth;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_stag_alloc {
-       struct c2wr_nsmr_stag_alloc_req req;
-       struct c2wr_nsmr_stag_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_req {
-       struct c2wr_hdr hdr;
-       __be64 va;
-       u32 rnic_handle;
-       __be16 flags;
-       u8 stag_key;
-       u8 pad;
-       u32 pd_id;
-       __be32 pbl_depth;
-       __be32 pbe_size;
-       __be32 fbo;
-       __be32 length;
-       __be32 addrs_length;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_rep {
-       struct c2wr_hdr hdr;
-       u32 pbl_depth;
-       __be32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_register {
-       struct c2wr_nsmr_register_req req;
-       struct c2wr_nsmr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       __be32 flags;
-       __be32 stag_index;
-       __be32 addrs_length;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_pbl {
-       struct c2wr_nsmr_pbl_req req;
-       struct c2wr_nsmr_pbl_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_rep {
-       struct c2wr_hdr hdr;
-       u8 stag_key;
-       u8 pad[3];
-       u32 pd_id;
-       u32 flags;
-       u32 pbl_depth;
-} __attribute__((packed)) ;
-
-union c2wr_mr_query {
-       struct c2wr_mr_query_req req;
-       struct c2wr_mr_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_rep {
-       struct c2wr_hdr hdr;
-       u8 stag_key;
-       u8 pad[3];
-       u32 pd_id;
-       u32 flags;
-} __attribute__((packed)) ;
-
-union c2wr_mw_query {
-       struct c2wr_mw_query_req req;
-       struct c2wr_mw_query_rep rep;
-} __attribute__((packed)) ;
-
-
-struct c2wr_stag_dealloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       __be32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_stag_dealloc_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_stag_dealloc {
-       struct c2wr_stag_dealloc_req req;
-       struct c2wr_stag_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_req {
-       struct c2wr_hdr hdr;
-       u64 va;
-       u32 rnic_handle;
-       u16 flags;
-       u8 stag_key;
-       u8 pad;
-       u32 stag_index;
-       u32 pd_id;
-       u32 pbl_depth;
-       u32 pbe_size;
-       u32 fbo;
-       u32 length;
-       u32 addrs_length;
-       u32 pad1;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       u64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_rep {
-       struct c2wr_hdr hdr;
-       u32 pbl_depth;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_reregister {
-       struct c2wr_nsmr_reregister_req req;
-       struct c2wr_nsmr_reregister_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_req {
-       struct c2wr_hdr hdr;
-       u64 va;
-       u32 rnic_handle;
-       u16 flags;
-       u8 stag_key;
-       u8 pad;
-       u32 stag_index;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_rep {
-       struct c2wr_hdr hdr;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_smr_register {
-       struct c2wr_smr_register_req req;
-       struct c2wr_smr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_rep {
-       struct c2wr_hdr hdr;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_mw_alloc {
-       struct c2wr_mw_alloc_req req;
-       struct c2wr_mw_alloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ WRs -----------------------
- */
-
-struct c2wr_user_hdr {
-       struct c2wr_hdr hdr;            /* Has status and WR Type */
-} __attribute__((packed)) ;
-
-enum c2_qp_state {
-       C2_QP_STATE_IDLE = 0x01,
-       C2_QP_STATE_CONNECTING = 0x02,
-       C2_QP_STATE_RTS = 0x04,
-       C2_QP_STATE_CLOSING = 0x08,
-       C2_QP_STATE_TERMINATE = 0x10,
-       C2_QP_STATE_ERROR = 0x20,
-};
-
-/* Completion queue entry. */
-struct c2wr_ce {
-       struct c2wr_hdr hdr;            /* Has status and WR Type */
-       u64 qp_user_context;    /* c2_user_qp_t * */
-       u32 qp_state;           /* Current QP State */
-       u32 handle;             /* QPID or EP Handle */
-       __be32 bytes_rcvd;              /* valid for RECV WCs */
-       u32 stag;
-} __attribute__((packed)) ;
-
-
-/*
- * Flags used for all post-sq WRs.  These must fit in the flags
- * field of the struct c2wr_hdr (eight bits).
- */
-enum {
-       SQ_SIGNALED = 0x01,
-       SQ_READ_FENCE = 0x02,
-       SQ_FENCE = 0x04,
-};
-
-/*
- * Common fields for all post-sq WRs.  Namely the standard header and a
- * secondary header with fields common to all post-sq WRs.
- */
-struct c2_sq_hdr {
-       struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * Same as above but for post-rq WRs.
- */
-struct c2_rq_hdr {
-       struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * use the same struct for all sends.
- */
-struct c2wr_send_req {
-       struct c2_sq_hdr sq_hdr;
-       __be32 sge_len;
-       __be32 remote_stag;
-       u8 data[0];             /* SGE array */
-} __attribute__((packed));
-
-union c2wr_send {
-       struct c2wr_send_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_write_req {
-       struct c2_sq_hdr sq_hdr;
-       __be64 remote_to;
-       __be32 remote_stag;
-       __be32 sge_len;
-       u8 data[0];             /* SGE array */
-} __attribute__((packed));
-
-union c2wr_rdma_write {
-       struct c2wr_rdma_write_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_read_req {
-       struct c2_sq_hdr sq_hdr;
-       __be64 local_to;
-       __be64 remote_to;
-       __be32 local_stag;
-       __be32 remote_stag;
-       __be32 length;
-} __attribute__((packed));
-
-union c2wr_rdma_read {
-       struct c2wr_rdma_read_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_mw_bind_req {
-       struct c2_sq_hdr sq_hdr;
-       u64 va;
-       u8 stag_key;
-       u8 pad[3];
-       u32 mw_stag_index;
-       u32 mr_stag_index;
-       u32 length;
-       u32 flags;
-} __attribute__((packed));
-
-union c2wr_mw_bind {
-       struct c2wr_mw_bind_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_nsmr_fastreg_req {
-       struct c2_sq_hdr sq_hdr;
-       u64 va;
-       u8 stag_key;
-       u8 pad[3];
-       u32 stag_index;
-       u32 pbe_size;
-       u32 fbo;
-       u32 length;
-       u32 addrs_length;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       u64 paddrs[0];
-} __attribute__((packed));
-
-union c2wr_nsmr_fastreg {
-       struct c2wr_nsmr_fastreg_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_stag_invalidate_req {
-       struct c2_sq_hdr sq_hdr;
-       u8 stag_key;
-       u8 pad[3];
-       u32 stag_index;
-} __attribute__((packed));
-
-union c2wr_stag_invalidate {
-       struct c2wr_stag_invalidate_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-union c2wr_sqwr {
-       struct c2_sq_hdr sq_hdr;
-       struct c2wr_send_req send;
-       struct c2wr_send_req send_se;
-       struct c2wr_send_req send_inv;
-       struct c2wr_send_req send_se_inv;
-       struct c2wr_rdma_write_req rdma_write;
-       struct c2wr_rdma_read_req rdma_read;
-       struct c2wr_mw_bind_req mw_bind;
-       struct c2wr_nsmr_fastreg_req nsmr_fastreg;
-       struct c2wr_stag_invalidate_req stag_inv;
-} __attribute__((packed));
-
-
-/*
- * RQ WRs
- */
-struct c2wr_rqwr {
-       struct c2_rq_hdr rq_hdr;
-       u8 data[0];             /* array of SGEs */
-} __attribute__((packed));
-
-union c2wr_recv {
-       struct c2wr_rqwr req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-/*
- * All AEs start with this header.  Most AEs only need to convey the
- * information in the header.  Some, like LLP connection events, need
- * more info.  The union typdef c2wr_ae_t has all the possible AEs.
- *
- * hdr.context is the user_context from the rnic_open WR.  NULL If this
- * is not affiliated with an rnic
- *
- * hdr.id is the AE identifier (eg;  CCAE_REMOTE_SHUTDOWN,
- * CCAE_LLP_CLOSE_COMPLETE)
- *
- * resource_type is one of:  C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
- *
- * user_context is the context passed down when the host created the resource.
- */
-struct c2wr_ae_hdr {
-       struct c2wr_hdr hdr;
-       u64 user_context;       /* user context for this res. */
-       __be32 resource_type;   /* see enum c2_resource_indicator */
-       __be32 resource;        /* handle for resource */
-       __be32 qp_state;        /* current QP State */
-} __attribute__((packed));
-
-/*
- * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
- * the adapter moves the QP into RTS state
- */
-struct c2wr_ae_active_connect_results {
-       struct c2wr_ae_hdr ae_hdr;
-       __be32 laddr;
-       __be32 raddr;
-       __be16 lport;
-       __be16 rport;
-       __be32 private_data_length;
-       u8 private_data[0];     /* data is in-line in the msg. */
-} __attribute__((packed));
-
-/*
- * When connections are established by the stack (and the private data
- * MPA frame is received), the adapter will generate an event to the host.
- * The details of the connection, any private data, and the new connection
- * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
- * AE queue:
- */
-struct c2wr_ae_connection_request {
-       struct c2wr_ae_hdr ae_hdr;
-       u32 cr_handle;          /* connreq handle (sock ptr) */
-       __be32 laddr;
-       __be32 raddr;
-       __be16 lport;
-       __be16 rport;
-       __be32 private_data_length;
-       u8 private_data[0];     /* data is in-line in the msg. */
-} __attribute__((packed));
-
-union c2wr_ae {
-       struct c2wr_ae_hdr ae_generic;
-       struct c2wr_ae_active_connect_results ae_active_connect_results;
-       struct c2wr_ae_connection_request ae_connection_request;
-} __attribute__((packed));
-
-struct c2wr_init_req {
-       struct c2wr_hdr hdr;
-       __be64 hint_count;
-       __be64 q0_host_shared;
-       __be64 q1_host_shared;
-       __be64 q1_host_msg_pool;
-       __be64 q2_host_shared;
-       __be64 q2_host_msg_pool;
-} __attribute__((packed));
-
-struct c2wr_init_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_init {
-       struct c2wr_init_req req;
-       struct c2wr_init_rep rep;
-} __attribute__((packed));
-
-/*
- * For upgrading flash.
- */
-
-struct c2wr_flash_init_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed));
-
-struct c2wr_flash_init_rep {
-       struct c2wr_hdr hdr;
-       u32 adapter_flash_buf_offset;
-       u32 adapter_flash_len;
-} __attribute__((packed));
-
-union c2wr_flash_init {
-       struct c2wr_flash_init_req req;
-       struct c2wr_flash_init_rep rep;
-} __attribute__((packed));
-
-struct c2wr_flash_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 len;
-} __attribute__((packed));
-
-struct c2wr_flash_rep {
-       struct c2wr_hdr hdr;
-       u32 status;
-} __attribute__((packed));
-
-union c2wr_flash {
-       struct c2wr_flash_req req;
-       struct c2wr_flash_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 size;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_rep {
-       struct c2wr_hdr hdr;
-       u32 offset;             /* 0 if mem not available */
-       u32 size;               /* 0 if mem not available */
-} __attribute__((packed));
-
-union c2wr_buf_alloc {
-       struct c2wr_buf_alloc_req req;
-       struct c2wr_buf_alloc_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_free_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 offset;             /* Must match value from alloc */
-       u32 size;               /* Must match value from alloc */
-} __attribute__((packed));
-
-struct c2wr_buf_free_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_buf_free {
-       struct c2wr_buf_free_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_flash_write_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 offset;
-       u32 size;
-       u32 type;
-       u32 flags;
-} __attribute__((packed));
-
-struct c2wr_flash_write_rep {
-       struct c2wr_hdr hdr;
-       u32 status;
-} __attribute__((packed));
-
-union c2wr_flash_write {
-       struct c2wr_flash_write_req req;
-       struct c2wr_flash_write_rep rep;
-} __attribute__((packed));
-
-/*
- * Messages for LLP connection setup.
- */
-
-/*
- * Listen Request.  This allocates a listening endpoint to allow passive
- * connection setup.  Newly established LLP connections are passed up
- * via an AE.  See c2wr_ae_connection_request_t
- */
-struct c2wr_ep_listen_create_req {
-       struct c2wr_hdr hdr;
-       u64 user_context;       /* returned in AEs. */
-       u32 rnic_handle;
-       __be32 local_addr;              /* local addr, or 0  */
-       __be16 local_port;              /* 0 means "pick one" */
-       u16 pad;
-       __be32 backlog;         /* tradional tcp listen bl */
-} __attribute__((packed));
-
-struct c2wr_ep_listen_create_rep {
-       struct c2wr_hdr hdr;
-       u32 ep_handle;          /* handle to new listening ep */
-       u16 local_port;         /* resulting port... */
-       u16 pad;
-} __attribute__((packed));
-
-union c2wr_ep_listen_create {
-       struct c2wr_ep_listen_create_req req;
-       struct c2wr_ep_listen_create_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_ep_listen_destroy {
-       struct c2wr_ep_listen_destroy_req req;
-       struct c2wr_ep_listen_destroy_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_query_rep {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 local_addr;
-       u32 remote_addr;
-       u16 local_port;
-       u16 remote_port;
-} __attribute__((packed));
-
-union c2wr_ep_query {
-       struct c2wr_ep_query_req req;
-       struct c2wr_ep_query_rep rep;
-} __attribute__((packed));
-
-
-/*
- * The host passes this down to indicate acceptance of a pending iWARP
- * connection.  The cr_handle was obtained from the CONNECTION_REQUEST
- * AE passed up by the adapter.  See c2wr_ae_connection_request_t.
- */
-struct c2wr_cr_accept_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;          /* QP to bind to this LLP conn */
-       u32 ep_handle;          /* LLP  handle to accept */
-       __be32 private_data_length;
-       u8 private_data[0];     /* data in-line in msg. */
-} __attribute__((packed));
-
-/*
- * adapter sends reply when private data is successfully submitted to
- * the LLP.
- */
-struct c2wr_cr_accept_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_accept {
-       struct c2wr_cr_accept_req req;
-       struct c2wr_cr_accept_rep rep;
-} __attribute__((packed));
-
-/*
- * The host sends this down if a given iWARP connection request was
- * rejected by the consumer.  The cr_handle was obtained from a
- * previous c2wr_ae_connection_request_t AE sent by the adapter.
- */
-struct  c2wr_cr_reject_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 ep_handle;          /* LLP handle to reject */
-} __attribute__((packed));
-
-/*
- * Dunno if this is needed, but we'll add it for now.  The adapter will
- * send the reject_reply after the LLP endpoint has been destroyed.
- */
-struct  c2wr_cr_reject_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_reject {
-       struct c2wr_cr_reject_req req;
-       struct c2wr_cr_reject_rep rep;
-} __attribute__((packed));
-
-/*
- * console command.  Used to implement a debug console over the verbs
- * request and reply queues.
- */
-
-/*
- * Console request message.  It contains:
- *     - message hdr with id = CCWR_CONSOLE
- *     - the physaddr/len of host memory to be used for the reply.
- *     - the command string.  eg:  "netstat -s" or "zoneinfo"
- */
-struct c2wr_console_req {
-       struct c2wr_hdr hdr;            /* id = CCWR_CONSOLE */
-       u64 reply_buf;          /* pinned host buf for reply */
-       u32 reply_buf_len;      /* length of reply buffer */
-       u8 command[0];          /* NUL terminated ascii string */
-       /* containing the command req */
-} __attribute__((packed));
-
-/*
- * flags used in the console reply.
- */
-enum c2_console_flags {
-       CONS_REPLY_TRUNCATED = 0x00000001       /* reply was truncated */
-} __attribute__((packed));
-
-/*
- * Console reply message.
- * hdr.result contains the c2_status_t error if the reply was _not_ generated,
- * or C2_OK if the reply was generated.
- */
-struct c2wr_console_rep {
-       struct c2wr_hdr hdr;            /* id = CCWR_CONSOLE */
-       u32 flags;
-} __attribute__((packed));
-
-union c2wr_console {
-       struct c2wr_console_req req;
-       struct c2wr_console_rep rep;
-} __attribute__((packed));
-
-
-/*
- * Giant union with all WRs.  Makes life easier...
- */
-union c2wr {
-       struct c2wr_hdr hdr;
-       struct c2wr_user_hdr user_hdr;
-       union c2wr_rnic_open rnic_open;
-       union c2wr_rnic_query rnic_query;
-       union c2wr_rnic_getconfig rnic_getconfig;
-       union c2wr_rnic_setconfig rnic_setconfig;
-       union c2wr_rnic_close rnic_close;
-       union c2wr_cq_create cq_create;
-       union c2wr_cq_modify cq_modify;
-       union c2wr_cq_destroy cq_destroy;
-       union c2wr_pd_alloc pd_alloc;
-       union c2wr_pd_dealloc pd_dealloc;
-       union c2wr_srq_create srq_create;
-       union c2wr_srq_destroy srq_destroy;
-       union c2wr_qp_create qp_create;
-       union c2wr_qp_query qp_query;
-       union c2wr_qp_modify qp_modify;
-       union c2wr_qp_destroy qp_destroy;
-       struct c2wr_qp_connect qp_connect;
-       union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
-       union c2wr_nsmr_register nsmr_register;
-       union c2wr_nsmr_pbl nsmr_pbl;
-       union c2wr_mr_query mr_query;
-       union c2wr_mw_query mw_query;
-       union c2wr_stag_dealloc stag_dealloc;
-       union c2wr_sqwr sqwr;
-       struct c2wr_rqwr rqwr;
-       struct c2wr_ce ce;
-       union c2wr_ae ae;
-       union c2wr_init init;
-       union c2wr_ep_listen_create ep_listen_create;
-       union c2wr_ep_listen_destroy ep_listen_destroy;
-       union c2wr_cr_accept cr_accept;
-       union c2wr_cr_reject cr_reject;
-       union c2wr_console console;
-       union c2wr_flash_init flash_init;
-       union c2wr_flash flash;
-       union c2wr_buf_alloc buf_alloc;
-       union c2wr_buf_free buf_free;
-       union c2wr_flash_write flash_write;
-} __attribute__((packed));
-
-
-/*
- * Accessors for the wr fields that are packed together tightly to
- * reduce the wr message size.  The wr arguments are void* so that
- * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
- * in the struct c2wr union can be passed in.
- */
-static __inline__ u8 c2_wr_get_id(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->id;
-}
-static __inline__ void c2_wr_set_id(void *wr, u8 id)
-{
-       ((struct c2wr_hdr *) wr)->id = id;
-}
-static __inline__ u8 c2_wr_get_result(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->result;
-}
-static __inline__ void c2_wr_set_result(void *wr, u8 result)
-{
-       ((struct c2wr_hdr *) wr)->result = result;
-}
-static __inline__ u8 c2_wr_get_flags(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->flags;
-}
-static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
-{
-       ((struct c2wr_hdr *) wr)->flags = flags;
-}
-static __inline__ u8 c2_wr_get_sge_count(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->sge_count;
-}
-static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
-{
-       ((struct c2wr_hdr *) wr)->sge_count = sge_count;
-}
-static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->wqe_count;
-}
-static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
-{
-       ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
-}
-
-#endif                         /* _C2_WR_H_ */
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644 (file)
index 3fadd2a..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-config INFINIBAND_EHCA
-       tristate "eHCA support"
-       depends on IBMEBUS
-       ---help---
-       This driver supports the deprecated IBM pSeries eHCA InfiniBand
-       adapter.
-
-       To compile the driver as a module, choose M here. The module
-       will be called ib_ehca.
-
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644 (file)
index 74d284e..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#  Authors: Heiko J Schick <schickhj@de.ibm.com>
-#           Christoph Raisch <raisch@de.ibm.com>
-#           Joachim Fenkes <fenkes@de.ibm.com>
-#
-#  Copyright (c) 2005 IBM Corporation
-#
-#  All rights reserved.
-#
-#  This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
-
-obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
-
-ib_ehca-objs  = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
-               ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
-               ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
-
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644 (file)
index 199a4a6..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-9/2015
-
-The ehca driver has been deprecated and moved to drivers/staging/rdma.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644 (file)
index 94e088c..0000000
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  address vector functions
- *
- *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *av_cache;
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
-                 enum ib_rate path_rate, u32 *ipd)
-{
-       int path = ib_rate_to_mult(path_rate);
-       int link, ret;
-       struct ib_port_attr pa;
-
-       if (path_rate == IB_RATE_PORT_CURRENT) {
-               *ipd = 0;
-               return 0;
-       }
-
-       if (unlikely(path < 0)) {
-               ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
-                        path_rate);
-               return -EINVAL;
-       }
-
-       ret = ehca_query_port(&shca->ib_device, port, &pa);
-       if (unlikely(ret < 0)) {
-               ehca_err(&shca->ib_device, "Failed to query port  ret=%i", ret);
-               return ret;
-       }
-
-       link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
-
-       if (path >= link)
-               /* no need to throttle if path faster than link */
-               *ipd = 0;
-       else
-               /* IPD = round((link / path) - 1) */
-               *ipd = ((link + (path >> 1)) / path) - 1;
-
-       return 0;
-}
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
-       int ret;
-       struct ehca_av *av;
-       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-                                             ib_device);
-
-       av = kmem_cache_alloc(av_cache, GFP_KERNEL);
-       if (!av) {
-               ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
-                        pd, ah_attr);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       av->av.sl = ah_attr->sl;
-       av->av.dlid = ah_attr->dlid;
-       av->av.slid_path_bits = ah_attr->src_path_bits;
-
-       if (ehca_static_rate < 0) {
-               u32 ipd;
-
-               if (ehca_calc_ipd(shca, ah_attr->port_num,
-                                 ah_attr->static_rate, &ipd)) {
-                       ret = -EINVAL;
-                       goto create_ah_exit1;
-               }
-               av->av.ipd = ipd;
-       } else
-               av->av.ipd = ehca_static_rate;
-
-       av->av.lnh = ah_attr->ah_flags;
-       av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
-                                           ah_attr->grh.traffic_class);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
-                                           ah_attr->grh.flow_label);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
-                                           ah_attr->grh.hop_limit);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
-       /* set sgid in grh.word_1 */
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               int rc;
-               struct ib_port_attr port_attr;
-               union ib_gid gid;
-
-               memset(&port_attr, 0, sizeof(port_attr));
-               rc = ehca_query_port(pd->device, ah_attr->port_num,
-                                    &port_attr);
-               if (rc) { /* invalid port number */
-                       ret = -EINVAL;
-                       ehca_err(pd->device, "Invalid port number "
-                                "ehca_query_port() returned %x "
-                                "pd=%p ah_attr=%p", rc, pd, ah_attr);
-                       goto create_ah_exit1;
-               }
-               memset(&gid, 0, sizeof(gid));
-               rc = ehca_query_gid(pd->device,
-                                   ah_attr->port_num,
-                                   ah_attr->grh.sgid_index, &gid);
-               if (rc) {
-                       ret = -EINVAL;
-                       ehca_err(pd->device, "Failed to retrieve sgid "
-                                "ehca_query_gid() returned %x "
-                                "pd=%p ah_attr=%p", rc, pd, ah_attr);
-                       goto create_ah_exit1;
-               }
-               memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
-       }
-       av->av.pmtu = shca->max_mtu;
-
-       /* dgid comes in grh.word_3 */
-       memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
-              sizeof(ah_attr->grh.dgid));
-
-       return &av->ib_ah;
-
-create_ah_exit1:
-       kmem_cache_free(av_cache, av);
-
-       return ERR_PTR(ret);
-}
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
-       struct ehca_av *av;
-       struct ehca_ud_av new_ehca_av;
-       struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
-                                             ib_device);
-
-       memset(&new_ehca_av, 0, sizeof(new_ehca_av));
-       new_ehca_av.sl = ah_attr->sl;
-       new_ehca_av.dlid = ah_attr->dlid;
-       new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
-       new_ehca_av.ipd = ah_attr->static_rate;
-       new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
-                                        (ah_attr->ah_flags & IB_AH_GRH) > 0);
-       new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
-                                               ah_attr->grh.traffic_class);
-       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
-                                                ah_attr->grh.flow_label);
-       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
-                                                ah_attr->grh.hop_limit);
-       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
-
-       /* set sgid in grh.word_1 */
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               int rc;
-               struct ib_port_attr port_attr;
-               union ib_gid gid;
-
-               memset(&port_attr, 0, sizeof(port_attr));
-               rc = ehca_query_port(ah->device, ah_attr->port_num,
-                                    &port_attr);
-               if (rc) { /* invalid port number */
-                       ehca_err(ah->device, "Invalid port number "
-                                "ehca_query_port() returned %x "
-                                "ah=%p ah_attr=%p port_num=%x",
-                                rc, ah, ah_attr, ah_attr->port_num);
-                       return -EINVAL;
-               }
-               memset(&gid, 0, sizeof(gid));
-               rc = ehca_query_gid(ah->device,
-                                   ah_attr->port_num,
-                                   ah_attr->grh.sgid_index, &gid);
-               if (rc) {
-                       ehca_err(ah->device, "Failed to retrieve sgid "
-                                "ehca_query_gid() returned %x "
-                                "ah=%p ah_attr=%p port_num=%x "
-                                "sgid_index=%x",
-                                rc, ah, ah_attr, ah_attr->port_num,
-                                ah_attr->grh.sgid_index);
-                       return -EINVAL;
-               }
-               memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
-       }
-
-       new_ehca_av.pmtu = shca->max_mtu;
-
-       memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
-              sizeof(ah_attr->grh.dgid));
-
-       av = container_of(ah, struct ehca_av, ib_ah);
-       av->av = new_ehca_av;
-
-       return 0;
-}
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
-       struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
-
-       memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
-              sizeof(ah_attr->grh.dgid));
-       ah_attr->sl = av->av.sl;
-
-       ah_attr->dlid = av->av.dlid;
-
-       ah_attr->src_path_bits = av->av.slid_path_bits;
-       ah_attr->static_rate = av->av.ipd;
-       ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
-       ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
-                                                   av->av.grh.word_0);
-       ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
-                                               av->av.grh.word_0);
-       ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
-                                                av->av.grh.word_0);
-
-       return 0;
-}
-
-int ehca_destroy_ah(struct ib_ah *ah)
-{
-       kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
-
-       return 0;
-}
-
-int ehca_init_av_cache(void)
-{
-       av_cache = kmem_cache_create("ehca_cache_av",
-                                  sizeof(struct ehca_av), 0,
-                                  SLAB_HWCACHE_ALIGN,
-                                  NULL);
-       if (!av_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_av_cache(void)
-{
-       kmem_cache_destroy(av_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644 (file)
index e8c3387..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Struct definition for eHCA internal structures
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_H__
-#define __EHCA_CLASSES_H__
-
-struct ehca_module;
-struct ehca_qp;
-struct ehca_cq;
-struct ehca_eq;
-struct ehca_mr;
-struct ehca_mw;
-struct ehca_pd;
-struct ehca_av;
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-
-#ifdef CONFIG_PPC64
-#include "ehca_classes_pSeries.h"
-#endif
-#include "ipz_pt_fn.h"
-#include "ehca_qes.h"
-#include "ehca_irq.h"
-
-#define EHCA_EQE_CACHE_SIZE 20
-#define EHCA_MAX_NUM_QUEUES 0xffff
-
-struct ehca_eqe_cache_entry {
-       struct ehca_eqe *eqe;
-       struct ehca_cq *cq;
-};
-
-struct ehca_eq {
-       u32 length;
-       struct ipz_queue ipz_queue;
-       struct ipz_eq_handle ipz_eq_handle;
-       struct work_struct work;
-       struct h_galpas galpas;
-       int is_initialized;
-       struct ehca_pfeq pf;
-       spinlock_t spinlock;
-       struct tasklet_struct interrupt_task;
-       u32 ist;
-       spinlock_t irq_spinlock;
-       struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
-};
-
-struct ehca_sma_attr {
-       u16 lid, lmc, sm_sl, sm_lid;
-       u16 pkey_tbl_len, pkeys[16];
-};
-
-struct ehca_sport {
-       struct ib_cq *ibcq_aqp1;
-       struct ib_qp *ibqp_sqp[2];
-       /* lock to serialze modify_qp() calls for sqp in normal
-        * and irq path (when event PORT_ACTIVE is received first time)
-        */
-       spinlock_t mod_sqp_lock;
-       enum ib_port_state port_state;
-       struct ehca_sma_attr saved_attr;
-       u32 pma_qp_nr;
-};
-
-#define HCA_CAP_MR_PGSIZE_4K  0x80000000
-#define HCA_CAP_MR_PGSIZE_64K 0x40000000
-#define HCA_CAP_MR_PGSIZE_1M  0x20000000
-#define HCA_CAP_MR_PGSIZE_16M 0x10000000
-
-struct ehca_shca {
-       struct ib_device ib_device;
-       struct platform_device *ofdev;
-       u8 num_ports;
-       int hw_level;
-       struct list_head shca_list;
-       struct ipz_adapter_handle ipz_hca_handle;
-       struct ehca_sport sport[2];
-       struct ehca_eq eq;
-       struct ehca_eq neq;
-       struct ehca_mr *maxmr;
-       struct ehca_pd *pd;
-       struct h_galpas galpas;
-       struct mutex modify_mutex;
-       u64 hca_cap;
-       /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
-       u32 hca_cap_mr_pgsize;
-       int max_mtu;
-       int max_num_qps;
-       int max_num_cqs;
-       atomic_t num_cqs;
-       atomic_t num_qps;
-};
-
-struct ehca_pd {
-       struct ib_pd ib_pd;
-       struct ipz_pd fw_pd;
-       /* small queue mgmt */
-       struct mutex lock;
-       struct list_head free[2];
-       struct list_head full[2];
-};
-
-enum ehca_ext_qp_type {
-       EQPT_NORMAL    = 0,
-       EQPT_LLQP      = 1,
-       EQPT_SRQBASE   = 2,
-       EQPT_SRQ       = 3,
-};
-
-/* struct to cache modify_qp()'s parms for GSI/SMI qp */
-struct ehca_mod_qp_parm {
-       int mask;
-       struct ib_qp_attr attr;
-};
-
-#define EHCA_MOD_QP_PARM_MAX 4
-
-#define QMAP_IDX_MASK 0xFFFFULL
-
-/* struct for tracking if cqes have been reported to the application */
-struct ehca_qmap_entry {
-       u16 app_wr_id;
-       u8 reported;
-       u8 cqe_req;
-};
-
-struct ehca_queue_map {
-       struct ehca_qmap_entry *map;
-       unsigned int entries;
-       unsigned int tail;
-       unsigned int left_to_poll;
-       unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
-};
-
-/* function to calculate the next index for the qmap */
-static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
-{
-       unsigned int temp = cur_index + 1;
-       return (temp == limit) ? 0 : temp;
-}
-
-struct ehca_qp {
-       union {
-               struct ib_qp ib_qp;
-               struct ib_srq ib_srq;
-       };
-       u32 qp_type;
-       enum ehca_ext_qp_type ext_type;
-       enum ib_qp_state state;
-       struct ipz_queue ipz_squeue;
-       struct ehca_queue_map sq_map;
-       struct ipz_queue ipz_rqueue;
-       struct ehca_queue_map rq_map;
-       struct h_galpas galpas;
-       u32 qkey;
-       u32 real_qp_num;
-       u32 token;
-       spinlock_t spinlock_s;
-       spinlock_t spinlock_r;
-       u32 sq_max_inline_data_size;
-       struct ipz_qp_handle ipz_qp_handle;
-       struct ehca_pfqp pf;
-       struct ib_qp_init_attr init_attr;
-       struct ehca_cq *send_cq;
-       struct ehca_cq *recv_cq;
-       unsigned int sqerr_purgeflag;
-       struct hlist_node list_entries;
-       /* array to cache modify_qp()'s parms for GSI/SMI qp */
-       struct ehca_mod_qp_parm *mod_qp_parm;
-       int mod_qp_parm_idx;
-       /* mmap counter for resources mapped into user space */
-       u32 mm_count_squeue;
-       u32 mm_count_rqueue;
-       u32 mm_count_galpa;
-       /* unsolicited ack circumvention */
-       int unsol_ack_circ;
-       int mtu_shift;
-       u32 message_count;
-       u32 packet_count;
-       atomic_t nr_events; /* events seen */
-       wait_queue_head_t wait_completion;
-       int mig_armed;
-       struct list_head sq_err_node;
-       struct list_head rq_err_node;
-};
-
-#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
-#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
-#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
-
-/* must be power of 2 */
-#define QP_HASHTAB_LEN 8
-
-struct ehca_cq {
-       struct ib_cq ib_cq;
-       struct ipz_queue ipz_queue;
-       struct h_galpas galpas;
-       spinlock_t spinlock;
-       u32 cq_number;
-       u32 token;
-       u32 nr_of_entries;
-       struct ipz_cq_handle ipz_cq_handle;
-       struct ehca_pfcq pf;
-       spinlock_t cb_lock;
-       struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
-       struct list_head entry;
-       u32 nr_callbacks;   /* #events assigned to cpu by scaling code */
-       atomic_t nr_events; /* #events seen */
-       wait_queue_head_t wait_completion;
-       spinlock_t task_lock;
-       /* mmap counter for resources mapped into user space */
-       u32 mm_count_queue;
-       u32 mm_count_galpa;
-       struct list_head sqp_err_list;
-       struct list_head rqp_err_list;
-};
-
-enum ehca_mr_flag {
-       EHCA_MR_FLAG_FMR = 0x80000000,   /* FMR, created with ehca_alloc_fmr */
-       EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR                           */
-};
-
-struct ehca_mr {
-       union {
-               struct ib_mr ib_mr;     /* must always be first in ehca_mr */
-               struct ib_fmr ib_fmr;   /* must always be first in ehca_mr */
-       } ib;
-       struct ib_umem *umem;
-       spinlock_t mrlock;
-
-       enum ehca_mr_flag flags;
-       u32 num_kpages;         /* number of kernel pages */
-       u32 num_hwpages;        /* number of hw pages to form MR */
-       u64 hwpage_size;        /* hw page size used for this MR */
-       int acl;                /* ACL (stored here for usage in reregister) */
-       u64 *start;             /* virtual start address (stored here for */
-                               /* usage in reregister) */
-       u64 size;               /* size (stored here for usage in reregister) */
-       u32 fmr_page_size;      /* page size for FMR */
-       u32 fmr_max_pages;      /* max pages for FMR */
-       u32 fmr_max_maps;       /* max outstanding maps for FMR */
-       u32 fmr_map_cnt;        /* map counter for FMR */
-       /* fw specific data */
-       struct ipz_mrmw_handle ipz_mr_handle;   /* MR handle for h-calls */
-       struct h_galpas galpas;
-};
-
-struct ehca_mw {
-       struct ib_mw ib_mw;     /* gen2 mw, must always be first in ehca_mw */
-       spinlock_t mwlock;
-
-       u8 never_bound;         /* indication MW was never bound */
-       struct ipz_mrmw_handle ipz_mw_handle;   /* MW handle for h-calls */
-       struct h_galpas galpas;
-};
-
-enum ehca_mr_pgi_type {
-       EHCA_MR_PGI_PHYS   = 1,  /* type of ehca_reg_phys_mr,
-                                 * ehca_rereg_phys_mr,
-                                 * ehca_reg_internal_maxmr */
-       EHCA_MR_PGI_USER   = 2,  /* type of ehca_reg_user_mr */
-       EHCA_MR_PGI_FMR    = 3   /* type of ehca_map_phys_fmr */
-};
-
-struct ehca_mr_pginfo {
-       enum ehca_mr_pgi_type type;
-       u64 num_kpages;
-       u64 kpage_cnt;
-       u64 hwpage_size;     /* hw page size used for this MR */
-       u64 num_hwpages;     /* number of hw pages */
-       u64 hwpage_cnt;      /* counter for hw pages */
-       u64 next_hwpage;     /* next hw page in buffer/chunk/listelem */
-
-       union {
-               struct { /* type EHCA_MR_PGI_PHYS section */
-                       u64 addr;
-                       u16 size;
-               } phy;
-               struct { /* type EHCA_MR_PGI_USER section */
-                       struct ib_umem *region;
-                       struct scatterlist *next_sg;
-                       u64 next_nmap;
-               } usr;
-               struct { /* type EHCA_MR_PGI_FMR section */
-                       u64 fmr_pgsize;
-                       u64 *page_list;
-                       u64 next_listelem;
-               } fmr;
-       } u;
-};
-
-/* output parameters for MR/FMR hipz calls */
-struct ehca_mr_hipzout_parms {
-       struct ipz_mrmw_handle handle;
-       u32 lkey;
-       u32 rkey;
-       u64 len;
-       u64 vaddr;
-       u32 acl;
-};
-
-/* output parameters for MW hipz calls */
-struct ehca_mw_hipzout_parms {
-       struct ipz_mrmw_handle handle;
-       u32 rkey;
-};
-
-struct ehca_av {
-       struct ib_ah ib_ah;
-       struct ehca_ud_av av;
-};
-
-struct ehca_ucontext {
-       struct ib_ucontext ib_ucontext;
-};
-
-int ehca_init_pd_cache(void);
-void ehca_cleanup_pd_cache(void);
-int ehca_init_cq_cache(void);
-void ehca_cleanup_cq_cache(void);
-int ehca_init_qp_cache(void);
-void ehca_cleanup_qp_cache(void);
-int ehca_init_av_cache(void);
-void ehca_cleanup_av_cache(void);
-int ehca_init_mrmw_cache(void);
-void ehca_cleanup_mrmw_cache(void);
-int ehca_init_small_qp_cache(void);
-void ehca_cleanup_small_qp_cache(void);
-
-extern rwlock_t ehca_qp_idr_lock;
-extern rwlock_t ehca_cq_idr_lock;
-extern struct idr ehca_qp_idr;
-extern struct idr ehca_cq_idr;
-extern spinlock_t shca_list_lock;
-
-extern int ehca_static_rate;
-extern int ehca_port_act_time;
-extern bool ehca_use_hp_mr;
-extern bool ehca_scaling_code;
-extern int ehca_lock_hcalls;
-extern int ehca_nr_ports;
-extern int ehca_max_cq;
-extern int ehca_max_qp;
-
-struct ipzu_queue_resp {
-       u32 qe_size;      /* queue entry size */
-       u32 act_nr_of_sg;
-       u32 queue_length; /* queue length allocated in bytes */
-       u32 pagesize;
-       u32 toggle_state;
-       u32 offset; /* save offset within a page for small_qp */
-};
-
-struct ehca_create_cq_resp {
-       u32 cq_number;
-       u32 token;
-       struct ipzu_queue_resp ipz_queue;
-       u32 fw_handle_ofs;
-       u32 dummy;
-};
-
-struct ehca_create_qp_resp {
-       u32 qp_num;
-       u32 token;
-       u32 qp_type;
-       u32 ext_type;
-       u32 qkey;
-       /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
-       u32 real_qp_num;
-       u32 fw_handle_ofs;
-       u32 dummy;
-       struct ipzu_queue_resp ipz_squeue;
-       struct ipzu_queue_resp ipz_rqueue;
-};
-
-struct ehca_alloc_cq_parms {
-       u32 nr_cqe;
-       u32 act_nr_of_entries;
-       u32 act_pages;
-       struct ipz_eq_handle eq_handle;
-};
-
-enum ehca_service_type {
-       ST_RC  = 0,
-       ST_UC  = 1,
-       ST_RD  = 2,
-       ST_UD  = 3,
-};
-
-enum ehca_ll_comp_flags {
-       LLQP_SEND_COMP = 0x20,
-       LLQP_RECV_COMP = 0x40,
-       LLQP_COMP_MASK = 0x60,
-};
-
-struct ehca_alloc_queue_parms {
-       /* input parameters */
-       int max_wr;
-       int max_sge;
-       int page_size;
-       int is_small;
-
-       /* output parameters */
-       u16 act_nr_wqes;
-       u8  act_nr_sges;
-       u32 queue_size; /* bytes for small queues, pages otherwise */
-};
-
-struct ehca_alloc_qp_parms {
-       struct ehca_alloc_queue_parms squeue;
-       struct ehca_alloc_queue_parms rqueue;
-
-       /* input parameters */
-       enum ehca_service_type servicetype;
-       int qp_storage;
-       int sigtype;
-       enum ehca_ext_qp_type ext_type;
-       enum ehca_ll_comp_flags ll_comp_flags;
-       int ud_av_l_key_ctl;
-
-       u32 token;
-       struct ipz_eq_handle eq_handle;
-       struct ipz_pd pd;
-       struct ipz_cq_handle send_cq_handle, recv_cq_handle;
-
-       u32 srq_qpn, srq_token, srq_limit;
-
-       /* output parameters */
-       u32 real_qp_num;
-       struct ipz_qp_handle qp_handle;
-       struct h_galpas galpas;
-};
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644 (file)
index 689c357..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  pSeries interface definitions
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_PSERIES_H__
-#define __EHCA_CLASSES_PSERIES_H__
-
-#include "hcp_phyp.h"
-#include "ipz_pt_fn.h"
-
-
-struct ehca_pfqp {
-       struct ipz_qpt sqpt;
-       struct ipz_qpt rqpt;
-};
-
-struct ehca_pfcq {
-       struct ipz_qpt qpt;
-       u32 cqnr;
-};
-
-struct ehca_pfeq {
-       struct ipz_qpt qpt;
-       struct h_galpa galpa;
-       u32 eqnr;
-};
-
-struct ipz_adapter_handle {
-       u64 handle;
-};
-
-struct ipz_cq_handle {
-       u64 handle;
-};
-
-struct ipz_eq_handle {
-       u64 handle;
-};
-
-struct ipz_qp_handle {
-       u64 handle;
-};
-struct ipz_mrmw_handle {
-       u64 handle;
-};
-
-struct ipz_pd {
-       u32 value;
-};
-
-struct hcp_modify_qp_control_block {
-       u32 qkey;                      /* 00 */
-       u32 rdd;                       /* reliable datagram domain */
-       u32 send_psn;                  /* 02 */
-       u32 receive_psn;               /* 03 */
-       u32 prim_phys_port;            /* 04 */
-       u32 alt_phys_port;             /* 05 */
-       u32 prim_p_key_idx;            /* 06 */
-       u32 alt_p_key_idx;             /* 07 */
-       u32 rdma_atomic_ctrl;          /* 08 */
-       u32 qp_state;                  /* 09 */
-       u32 reserved_10;               /* 10 */
-       u32 rdma_nr_atomic_resp_res;   /* 11 */
-       u32 path_migration_state;      /* 12 */
-       u32 rdma_atomic_outst_dest_qp; /* 13 */
-       u32 dest_qp_nr;                /* 14 */
-       u32 min_rnr_nak_timer_field;   /* 15 */
-       u32 service_level;             /* 16 */
-       u32 send_grh_flag;             /* 17 */
-       u32 retry_count;               /* 18 */
-       u32 timeout;                   /* 19 */
-       u32 path_mtu;                  /* 20 */
-       u32 max_static_rate;           /* 21 */
-       u32 dlid;                      /* 22 */
-       u32 rnr_retry_count;           /* 23 */
-       u32 source_path_bits;          /* 24 */
-       u32 traffic_class;             /* 25 */
-       u32 hop_limit;                 /* 26 */
-       u32 source_gid_idx;            /* 27 */
-       u32 flow_label;                /* 28 */
-       u32 reserved_29;               /* 29 */
-       union {                        /* 30 */
-               u64 dw[2];
-               u8 byte[16];
-       } dest_gid;
-       u32 service_level_al;          /* 34 */
-       u32 send_grh_flag_al;          /* 35 */
-       u32 retry_count_al;            /* 36 */
-       u32 timeout_al;                /* 37 */
-       u32 max_static_rate_al;        /* 38 */
-       u32 dlid_al;                   /* 39 */
-       u32 rnr_retry_count_al;        /* 40 */
-       u32 source_path_bits_al;       /* 41 */
-       u32 traffic_class_al;          /* 42 */
-       u32 hop_limit_al;              /* 43 */
-       u32 source_gid_idx_al;         /* 44 */
-       u32 flow_label_al;             /* 45 */
-       u32 reserved_46;               /* 46 */
-       u32 reserved_47;               /* 47 */
-       union {                        /* 48 */
-               u64 dw[2];
-               u8 byte[16];
-       } dest_gid_al;
-       u32 max_nr_outst_send_wr;      /* 52 */
-       u32 max_nr_outst_recv_wr;      /* 53 */
-       u32 disable_ete_credit_check;  /* 54 */
-       u32 qp_number;                 /* 55 */
-       u64 send_queue_handle;         /* 56 */
-       u64 recv_queue_handle;         /* 58 */
-       u32 actual_nr_sges_in_sq_wqe;  /* 60 */
-       u32 actual_nr_sges_in_rq_wqe;  /* 61 */
-       u32 qp_enable;                 /* 62 */
-       u32 curr_srq_limit;            /* 63 */
-       u64 qp_aff_asyn_ev_log_reg;    /* 64 */
-       u64 shared_rq_hndl;            /* 66 */
-       u64 trigg_doorbell_qp_hndl;    /* 68 */
-       u32 reserved_70_127[58];       /* 70 */
-};
-
-#define MQPCB_MASK_QKEY                         EHCA_BMASK_IBM( 0,  0)
-#define MQPCB_MASK_SEND_PSN                     EHCA_BMASK_IBM( 2,  2)
-#define MQPCB_MASK_RECEIVE_PSN                  EHCA_BMASK_IBM( 3,  3)
-#define MQPCB_MASK_PRIM_PHYS_PORT               EHCA_BMASK_IBM( 4,  4)
-#define MQPCB_PRIM_PHYS_PORT                    EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_PHYS_PORT                EHCA_BMASK_IBM( 5,  5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX               EHCA_BMASK_IBM( 6,  6)
-#define MQPCB_PRIM_P_KEY_IDX                    EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_P_KEY_IDX                EHCA_BMASK_IBM( 7,  7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL             EHCA_BMASK_IBM( 8,  8)
-#define MQPCB_MASK_QP_STATE                     EHCA_BMASK_IBM( 9,  9)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES      EHCA_BMASK_IBM(11, 11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE         EHCA_BMASK_IBM(12, 12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP    EHCA_BMASK_IBM(13, 13)
-#define MQPCB_MASK_DEST_QP_NR                   EHCA_BMASK_IBM(14, 14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD      EHCA_BMASK_IBM(15, 15)
-#define MQPCB_MASK_SERVICE_LEVEL                EHCA_BMASK_IBM(16, 16)
-#define MQPCB_MASK_SEND_GRH_FLAG                EHCA_BMASK_IBM(17, 17)
-#define MQPCB_MASK_RETRY_COUNT                  EHCA_BMASK_IBM(18, 18)
-#define MQPCB_MASK_TIMEOUT                      EHCA_BMASK_IBM(19, 19)
-#define MQPCB_MASK_PATH_MTU                     EHCA_BMASK_IBM(20, 20)
-#define MQPCB_MASK_MAX_STATIC_RATE              EHCA_BMASK_IBM(21, 21)
-#define MQPCB_MASK_DLID                         EHCA_BMASK_IBM(22, 22)
-#define MQPCB_MASK_RNR_RETRY_COUNT              EHCA_BMASK_IBM(23, 23)
-#define MQPCB_MASK_SOURCE_PATH_BITS             EHCA_BMASK_IBM(24, 24)
-#define MQPCB_MASK_TRAFFIC_CLASS                EHCA_BMASK_IBM(25, 25)
-#define MQPCB_MASK_HOP_LIMIT                    EHCA_BMASK_IBM(26, 26)
-#define MQPCB_MASK_SOURCE_GID_IDX               EHCA_BMASK_IBM(27, 27)
-#define MQPCB_MASK_FLOW_LABEL                   EHCA_BMASK_IBM(28, 28)
-#define MQPCB_MASK_DEST_GID                     EHCA_BMASK_IBM(30, 30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL             EHCA_BMASK_IBM(31, 31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL             EHCA_BMASK_IBM(32, 32)
-#define MQPCB_MASK_RETRY_COUNT_AL               EHCA_BMASK_IBM(33, 33)
-#define MQPCB_MASK_TIMEOUT_AL                   EHCA_BMASK_IBM(34, 34)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL           EHCA_BMASK_IBM(35, 35)
-#define MQPCB_MASK_DLID_AL                      EHCA_BMASK_IBM(36, 36)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL           EHCA_BMASK_IBM(37, 37)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL          EHCA_BMASK_IBM(38, 38)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL             EHCA_BMASK_IBM(39, 39)
-#define MQPCB_MASK_HOP_LIMIT_AL                 EHCA_BMASK_IBM(40, 40)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL            EHCA_BMASK_IBM(41, 41)
-#define MQPCB_MASK_FLOW_LABEL_AL                EHCA_BMASK_IBM(42, 42)
-#define MQPCB_MASK_DEST_GID_AL                  EHCA_BMASK_IBM(44, 44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR         EHCA_BMASK_IBM(45, 45)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR         EHCA_BMASK_IBM(46, 46)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK     EHCA_BMASK_IBM(47, 47)
-#define MQPCB_MASK_QP_ENABLE                    EHCA_BMASK_IBM(48, 48)
-#define MQPCB_MASK_CURR_SRQ_LIMIT               EHCA_BMASK_IBM(49, 49)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG       EHCA_BMASK_IBM(50, 50)
-#define MQPCB_MASK_SHARED_RQ_HNDL               EHCA_BMASK_IBM(51, 51)
-
-#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644 (file)
index 1aa7931..0000000
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Completion queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *cq_cache;
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
-{
-       unsigned int qp_num = qp->real_qp_num;
-       unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->spinlock, flags);
-       hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
-       spin_unlock_irqrestore(&cq->spinlock, flags);
-
-       ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
-                cq->cq_number, qp_num);
-
-       return 0;
-}
-
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
-{
-       int ret = -EINVAL;
-       unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-       struct hlist_node *iter;
-       struct ehca_qp *qp;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->spinlock, flags);
-       hlist_for_each(iter, &cq->qp_hashtab[key]) {
-               qp = hlist_entry(iter, struct ehca_qp, list_entries);
-               if (qp->real_qp_num == real_qp_num) {
-                       hlist_del(iter);
-                       ehca_dbg(cq->ib_cq.device,
-                                "removed qp from cq .cq_num=%x real_qp_num=%x",
-                                cq->cq_number, real_qp_num);
-                       ret = 0;
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&cq->spinlock, flags);
-       if (ret)
-               ehca_err(cq->ib_cq.device,
-                        "qp not found cq_num=%x real_qp_num=%x",
-                        cq->cq_number, real_qp_num);
-
-       return ret;
-}
-
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
-{
-       struct ehca_qp *ret = NULL;
-       unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-       struct hlist_node *iter;
-       struct ehca_qp *qp;
-       hlist_for_each(iter, &cq->qp_hashtab[key]) {
-               qp = hlist_entry(iter, struct ehca_qp, list_entries);
-               if (qp->real_qp_num == real_qp_num) {
-                       ret = qp;
-                       break;
-               }
-       }
-       return ret;
-}
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
-                            const struct ib_cq_init_attr *attr,
-                            struct ib_ucontext *context,
-                            struct ib_udata *udata)
-{
-       int cqe = attr->cqe;
-       static const u32 additional_cqe = 20;
-       struct ib_cq *cq;
-       struct ehca_cq *my_cq;
-       struct ehca_shca *shca =
-               container_of(device, struct ehca_shca, ib_device);
-       struct ipz_adapter_handle adapter_handle;
-       struct ehca_alloc_cq_parms param; /* h_call's out parameters */
-       struct h_galpa gal;
-       void *vpage;
-       u32 counter;
-       u64 rpage, cqx_fec, h_ret;
-       int rc, i;
-       unsigned long flags;
-
-       if (attr->flags)
-               return ERR_PTR(-EINVAL);
-
-       if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
-               return ERR_PTR(-EINVAL);
-
-       if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
-               ehca_err(device, "Unable to create CQ, max number of %i "
-                       "CQs reached.", shca->max_num_cqs);
-               ehca_err(device, "To increase the maximum number of CQs "
-                       "use the number_of_cqs module parameter.\n");
-               return ERR_PTR(-ENOSPC);
-       }
-
-       my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
-       if (!my_cq) {
-               ehca_err(device, "Out of memory for ehca_cq struct device=%p",
-                        device);
-               atomic_dec(&shca->num_cqs);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
-
-       spin_lock_init(&my_cq->spinlock);
-       spin_lock_init(&my_cq->cb_lock);
-       spin_lock_init(&my_cq->task_lock);
-       atomic_set(&my_cq->nr_events, 0);
-       init_waitqueue_head(&my_cq->wait_completion);
-
-       cq = &my_cq->ib_cq;
-
-       adapter_handle = shca->ipz_hca_handle;
-       param.eq_handle = shca->eq.ipz_eq_handle;
-
-       idr_preload(GFP_KERNEL);
-       write_lock_irqsave(&ehca_cq_idr_lock, flags);
-       rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
-       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-       idr_preload_end();
-
-       if (rc < 0) {
-               cq = ERR_PTR(-ENOMEM);
-               ehca_err(device, "Can't allocate new idr entry. device=%p",
-                        device);
-               goto create_cq_exit1;
-       }
-       my_cq->token = rc;
-
-       /*
-        * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
-        * for receiving errors CQEs.
-        */
-       param.nr_cqe = cqe + additional_cqe;
-       h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
-
-       if (h_ret != H_SUCCESS) {
-               ehca_err(device, "hipz_h_alloc_resource_cq() failed "
-                        "h_ret=%lli device=%p", h_ret, device);
-               cq = ERR_PTR(ehca2ib_return_code(h_ret));
-               goto create_cq_exit2;
-       }
-
-       rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
-                               EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
-       if (!rc) {
-               ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
-                        rc, device);
-               cq = ERR_PTR(-EINVAL);
-               goto create_cq_exit3;
-       }
-
-       for (counter = 0; counter < param.act_pages; counter++) {
-               vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
-               if (!vpage) {
-                       ehca_err(device, "ipz_qpageit_get_inc() "
-                                "returns NULL device=%p", device);
-                       cq = ERR_PTR(-EAGAIN);
-                       goto create_cq_exit4;
-               }
-               rpage = __pa(vpage);
-
-               h_ret = hipz_h_register_rpage_cq(adapter_handle,
-                                                my_cq->ipz_cq_handle,
-                                                &my_cq->pf,
-                                                0,
-                                                0,
-                                                rpage,
-                                                1,
-                                                my_cq->galpas.
-                                                kernel);
-
-               if (h_ret < H_SUCCESS) {
-                       ehca_err(device, "hipz_h_register_rpage_cq() failed "
-                                "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
-                                "act_pages=%i", my_cq, my_cq->cq_number,
-                                h_ret, counter, param.act_pages);
-                       cq = ERR_PTR(-EINVAL);
-                       goto create_cq_exit4;
-               }
-
-               if (counter == (param.act_pages - 1)) {
-                       vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
-                       if ((h_ret != H_SUCCESS) || vpage) {
-                               ehca_err(device, "Registration of pages not "
-                                        "complete ehca_cq=%p cq_num=%x "
-                                        "h_ret=%lli", my_cq, my_cq->cq_number,
-                                        h_ret);
-                               cq = ERR_PTR(-EAGAIN);
-                               goto create_cq_exit4;
-                       }
-               } else {
-                       if (h_ret != H_PAGE_REGISTERED) {
-                               ehca_err(device, "Registration of page failed "
-                                        "ehca_cq=%p cq_num=%x h_ret=%lli "
-                                        "counter=%i act_pages=%i",
-                                        my_cq, my_cq->cq_number,
-                                        h_ret, counter, param.act_pages);
-                               cq = ERR_PTR(-ENOMEM);
-                               goto create_cq_exit4;
-                       }
-               }
-       }
-
-       ipz_qeit_reset(&my_cq->ipz_queue);
-
-       gal = my_cq->galpas.kernel;
-       cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
-       ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
-                my_cq, my_cq->cq_number, cqx_fec);
-
-       my_cq->ib_cq.cqe = my_cq->nr_of_entries =
-               param.act_nr_of_entries - additional_cqe;
-       my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
-
-       for (i = 0; i < QP_HASHTAB_LEN; i++)
-               INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
-
-       INIT_LIST_HEAD(&my_cq->sqp_err_list);
-       INIT_LIST_HEAD(&my_cq->rqp_err_list);
-
-       if (context) {
-               struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
-               struct ehca_create_cq_resp resp;
-               memset(&resp, 0, sizeof(resp));
-               resp.cq_number = my_cq->cq_number;
-               resp.token = my_cq->token;
-               resp.ipz_queue.qe_size = ipz_queue->qe_size;
-               resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
-               resp.ipz_queue.queue_length = ipz_queue->queue_length;
-               resp.ipz_queue.pagesize = ipz_queue->pagesize;
-               resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
-               resp.fw_handle_ofs = (u32)
-                       (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
-               if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-                       ehca_err(device, "Copy to udata failed.");
-                       cq = ERR_PTR(-EFAULT);
-                       goto create_cq_exit4;
-               }
-       }
-
-       return cq;
-
-create_cq_exit4:
-       ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-
-create_cq_exit3:
-       h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-       if (h_ret != H_SUCCESS)
-               ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
-                        "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
-
-create_cq_exit2:
-       write_lock_irqsave(&ehca_cq_idr_lock, flags);
-       idr_remove(&ehca_cq_idr, my_cq->token);
-       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-create_cq_exit1:
-       kmem_cache_free(cq_cache, my_cq);
-
-       atomic_dec(&shca->num_cqs);
-       return cq;
-}
-
-int ehca_destroy_cq(struct ib_cq *cq)
-{
-       u64 h_ret;
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       int cq_num = my_cq->cq_number;
-       struct ib_device *device = cq->device;
-       struct ehca_shca *shca = container_of(device, struct ehca_shca,
-                                             ib_device);
-       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-       unsigned long flags;
-
-       if (cq->uobject) {
-               if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
-                       ehca_err(device, "Resources still referenced in "
-                                "user space cq_num=%x", my_cq->cq_number);
-                       return -EINVAL;
-               }
-       }
-
-       /*
-        * remove the CQ from the idr first to make sure
-        * no more interrupt tasklets will touch this CQ
-        */
-       write_lock_irqsave(&ehca_cq_idr_lock, flags);
-       idr_remove(&ehca_cq_idr, my_cq->token);
-       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-       /* now wait until all pending events have completed */
-       wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
-
-       /* nobody's using our CQ any longer -- we can destroy it */
-       h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
-       if (h_ret == H_R_STATE) {
-               /* cq in err: read err data and destroy it forcibly */
-               ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
-                        "state. Try to delete it forcibly.",
-                        my_cq, cq_num, my_cq->ipz_cq_handle.handle);
-               ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
-               h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-               if (h_ret == H_SUCCESS)
-                       ehca_dbg(device, "cq_num=%x deleted successfully.",
-                                cq_num);
-       }
-       if (h_ret != H_SUCCESS) {
-               ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
-                        "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
-               return ehca2ib_return_code(h_ret);
-       }
-       ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-       kmem_cache_free(cq_cache, my_cq);
-
-       atomic_dec(&shca->num_cqs);
-       return 0;
-}
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
-       /* TODO: proper resize needs to be done */
-       ehca_err(cq->device, "not implemented yet");
-
-       return -EFAULT;
-}
-
-int ehca_init_cq_cache(void)
-{
-       cq_cache = kmem_cache_create("ehca_cache_cq",
-                                    sizeof(struct ehca_cq), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!cq_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_cq_cache(void)
-{
-       kmem_cache_destroy(cq_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644 (file)
index 90da674..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Event queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_qes.h"
-#include "hcp_if.h"
-#include "ipz_pt_fn.h"
-
-int ehca_create_eq(struct ehca_shca *shca,
-                  struct ehca_eq *eq,
-                  const enum ehca_eq_type type, const u32 length)
-{
-       int ret;
-       u64 h_ret;
-       u32 nr_pages;
-       u32 i;
-       void *vpage;
-       struct ib_device *ib_dev = &shca->ib_device;
-
-       spin_lock_init(&eq->spinlock);
-       spin_lock_init(&eq->irq_spinlock);
-       eq->is_initialized = 0;
-
-       if (type != EHCA_EQ && type != EHCA_NEQ) {
-               ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
-               return -EINVAL;
-       }
-       if (!length) {
-               ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
-               return -EINVAL;
-       }
-
-       h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
-                                        &eq->pf,
-                                        type,
-                                        length,
-                                        &eq->ipz_eq_handle,
-                                        &eq->length,
-                                        &nr_pages, &eq->ist);
-
-       if (h_ret != H_SUCCESS) {
-               ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
-               return -EINVAL;
-       }
-
-       ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
-                            EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
-       if (!ret) {
-               ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
-               goto create_eq_exit1;
-       }
-
-       for (i = 0; i < nr_pages; i++) {
-               u64 rpage;
-
-               vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-               if (!vpage)
-                       goto create_eq_exit2;
-
-               rpage = __pa(vpage);
-               h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
-                                                eq->ipz_eq_handle,
-                                                &eq->pf,
-                                                0, 0, rpage, 1);
-
-               if (i == (nr_pages - 1)) {
-                       /* last page */
-                       vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-                       if (h_ret != H_SUCCESS || vpage)
-                               goto create_eq_exit2;
-               } else {
-                       if (h_ret != H_PAGE_REGISTERED)
-                               goto create_eq_exit2;
-               }
-       }
-
-       ipz_qeit_reset(&eq->ipz_queue);
-
-       /* register interrupt handlers and initialize work queues */
-       if (type == EHCA_EQ) {
-               tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
-
-               ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
-                                         0, "ehca_eq",
-                                         (void *)shca);
-               if (ret < 0)
-                       ehca_err(ib_dev, "Can't map interrupt handler.");
-       } else if (type == EHCA_NEQ) {
-               tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
-
-               ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
-                                         0, "ehca_neq",
-                                         (void *)shca);
-               if (ret < 0)
-                       ehca_err(ib_dev, "Can't map interrupt handler.");
-       }
-
-       eq->is_initialized = 1;
-
-       return 0;
-
-create_eq_exit2:
-       ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-create_eq_exit1:
-       hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
-       return -EINVAL;
-}
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
-       unsigned long flags;
-       void *eqe;
-
-       spin_lock_irqsave(&eq->spinlock, flags);
-       eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
-       spin_unlock_irqrestore(&eq->spinlock, flags);
-
-       return eqe;
-}
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
-       unsigned long flags;
-       u64 h_ret;
-
-       ibmebus_free_irq(eq->ist, (void *)shca);
-
-       spin_lock_irqsave(&shca_list_lock, flags);
-       eq->is_initialized = 0;
-       spin_unlock_irqrestore(&shca_list_lock, flags);
-
-       tasklet_kill(&eq->interrupt_task);
-
-       h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't free EQ resources.");
-               return -EINVAL;
-       }
-       ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644 (file)
index e8b1bb6..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HCA query functions
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/gfp.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static unsigned int limit_uint(unsigned int value)
-{
-       return min_t(unsigned int, value, INT_MAX);
-}
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                     struct ib_udata *uhw)
-{
-       int i, ret = 0;
-       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-                                             ib_device);
-       struct hipz_query_hca *rblock;
-
-       static const u32 cap_mapping[] = {
-               IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
-               IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
-               IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
-               IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
-               IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
-               IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
-               IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
-               IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
-               IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
-               IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
-               IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
-       };
-
-       if (uhw->inlen || uhw->outlen)
-               return -EINVAL;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query device properties");
-               ret = -EINVAL;
-               goto query_device1;
-       }
-
-       memset(props, 0, sizeof(struct ib_device_attr));
-       props->page_size_cap   = shca->hca_cap_mr_pgsize;
-       props->fw_ver          = rblock->hw_ver;
-       props->max_mr_size     = rblock->max_mr_size;
-       props->vendor_id       = rblock->vendor_id >> 8;
-       props->vendor_part_id  = rblock->vendor_part_id >> 16;
-       props->hw_ver          = rblock->hw_ver;
-       props->max_qp          = limit_uint(rblock->max_qp);
-       props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
-       props->max_sge         = limit_uint(rblock->max_sge);
-       props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
-       props->max_cq          = limit_uint(rblock->max_cq);
-       props->max_cqe         = limit_uint(rblock->max_cqe);
-       props->max_mr          = limit_uint(rblock->max_mr);
-       props->max_mw          = limit_uint(rblock->max_mw);
-       props->max_pd          = limit_uint(rblock->max_pd);
-       props->max_ah          = limit_uint(rblock->max_ah);
-       props->max_ee          = limit_uint(rblock->max_rd_ee_context);
-       props->max_rdd         = limit_uint(rblock->max_rd_domain);
-       props->max_fmr         = limit_uint(rblock->max_mr);
-       props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
-       props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
-       props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
-       props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
-       props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
-
-       if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-               props->max_srq         = limit_uint(props->max_qp);
-               props->max_srq_wr      = limit_uint(props->max_qp_wr);
-               props->max_srq_sge     = 3;
-       }
-
-       props->max_pkeys           = 16;
-       /* Some FW versions say 0 here; insert sensible value in that case */
-       props->local_ca_ack_delay  = rblock->local_ca_ack_delay ?
-               min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
-       props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
-       props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
-       props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
-       props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
-       props->max_total_mcast_qp_attach
-               = limit_uint(rblock->max_total_mcast_qp_attach);
-
-       /* translate device capabilities */
-       props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
-               IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
-       for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
-               if (rblock->hca_cap_indicators & cap_mapping[i + 1])
-                       props->device_cap_flags |= cap_mapping[i];
-
-query_device1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
-{
-       switch (fw_mtu) {
-       case 0x1:
-               return IB_MTU_256;
-       case 0x2:
-               return IB_MTU_512;
-       case 0x3:
-               return IB_MTU_1024;
-       case 0x4:
-               return IB_MTU_2048;
-       case 0x5:
-               return IB_MTU_4096;
-       default:
-               ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
-                        fw_mtu);
-               return 0;
-       }
-}
-
-static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
-{
-       switch (vl_cap) {
-       case 0x1:
-               return 1;
-       case 0x2:
-               return 2;
-       case 0x3:
-               return 4;
-       case 0x4:
-               return 8;
-       case 0x5:
-               return 15;
-       default:
-               ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
-                        vl_cap);
-               return 0;
-       }
-}
-
-int ehca_query_port(struct ib_device *ibdev,
-                   u8 port, struct ib_port_attr *props)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-                                             ib_device);
-       struct hipz_query_port *rblock;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_port1;
-       }
-
-       memset(props, 0, sizeof(struct ib_port_attr));
-
-       props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
-       props->port_cap_flags  = rblock->capability_mask;
-       props->gid_tbl_len     = rblock->gid_tbl_len;
-       if (rblock->max_msg_sz)
-               props->max_msg_sz      = rblock->max_msg_sz;
-       else
-               props->max_msg_sz      = 0x1 << 31;
-       props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
-       props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
-       props->pkey_tbl_len    = rblock->pkey_tbl_len;
-       props->lid             = rblock->lid;
-       props->sm_lid          = rblock->sm_lid;
-       props->lmc             = rblock->lmc;
-       props->sm_sl           = rblock->sm_sl;
-       props->subnet_timeout  = rblock->subnet_timeout;
-       props->init_type_reply = rblock->init_type_reply;
-       props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);
-
-       if (rblock->state && rblock->phys_width) {
-               props->phys_state      = rblock->phys_pstate;
-               props->state           = rblock->phys_state;
-               props->active_width    = rblock->phys_width;
-               props->active_speed    = rblock->phys_speed;
-       } else {
-               /* old firmware releases don't report physical
-                * port info, so use default values
-                */
-               props->phys_state      = 5;
-               props->state           = rblock->state;
-               props->active_width    = IB_WIDTH_12X;
-               props->active_speed    = IB_SPEED_SDR;
-       }
-
-query_port1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-int ehca_query_sma_attr(struct ehca_shca *shca,
-                       u8 port, struct ehca_sma_attr *attr)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct hipz_query_port *rblock;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_sma_attr1;
-       }
-
-       memset(attr, 0, sizeof(struct ehca_sma_attr));
-
-       attr->lid    = rblock->lid;
-       attr->lmc    = rblock->lmc;
-       attr->sm_sl  = rblock->sm_sl;
-       attr->sm_lid = rblock->sm_lid;
-
-       attr->pkey_tbl_len = rblock->pkey_tbl_len;
-       memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
-
-query_sma_attr1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca;
-       struct hipz_query_port *rblock;
-
-       shca = container_of(ibdev, struct ehca_shca, ib_device);
-       if (index > 16) {
-               ehca_err(&shca->ib_device, "Invalid index: %x.", index);
-               return -EINVAL;
-       }
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_pkey1;
-       }
-
-       memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
-
-query_pkey1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port,
-                  int index, union ib_gid *gid)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-                                             ib_device);
-       struct hipz_query_port *rblock;
-
-       if (index < 0 || index > 255) {
-               ehca_err(&shca->ib_device, "Invalid index: %x.", index);
-               return -EINVAL;
-       }
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_gid1;
-       }
-
-       memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
-       memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
-
-query_gid1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-static const u32 allowed_port_caps = (
-       IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
-       IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
-       IB_PORT_VENDOR_CLASS_SUP);
-
-int ehca_modify_port(struct ib_device *ibdev,
-                    u8 port, int port_modify_mask,
-                    struct ib_port_modify *props)
-{
-       int ret = 0;
-       struct ehca_shca *shca;
-       struct hipz_query_port *rblock;
-       u32 cap;
-       u64 hret;
-
-       shca = container_of(ibdev, struct ehca_shca, ib_device);
-       if ((props->set_port_cap_mask | props->clr_port_cap_mask)
-           & ~allowed_port_caps) {
-               ehca_err(&shca->ib_device, "Non-changeable bits set in masks  "
-                        "set=%x  clr=%x  allowed=%x", props->set_port_cap_mask,
-                        props->clr_port_cap_mask, allowed_port_caps);
-               return -EINVAL;
-       }
-
-       if (mutex_lock_interruptible(&shca->modify_mutex))
-               return -ERESTARTSYS;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
-               ret = -ENOMEM;
-               goto modify_port1;
-       }
-
-       hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (hret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto modify_port2;
-       }
-
-       cap = (rblock->capability_mask | props->set_port_cap_mask)
-               & ~props->clr_port_cap_mask;
-
-       hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
-                                 cap, props->init_type, port_modify_mask);
-       if (hret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Modify port failed  h_ret=%lli",
-                        hret);
-               ret = -EINVAL;
-       }
-
-modify_port2:
-       ehca_free_fw_ctrlblock(rblock);
-
-modify_port1:
-       mutex_unlock(&shca->modify_mutex);
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644 (file)
index 8615d7c..0000000
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Functions for EQs, NEQs and interrupts
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/smpboot.h>
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM( 1,  1)
-#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM( 8, 31)
-#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM( 2,  7)
-#define EQE_CQ_NUMBER          EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_NUMBER          EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32, 63)
-#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32, 63)
-
-#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM( 1,  1)
-#define NEQE_EVENT_CODE        EHCA_BMASK_IBM( 2,  7)
-#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
-#define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)
-#define NEQE_SPECIFIC_EVENT    EHCA_BMASK_IBM(16, 23)
-
-#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
-#define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
-
-static void queue_comp_task(struct ehca_cq *__cq);
-
-static struct ehca_comp_pool *pool;
-
-static inline void comp_event_callback(struct ehca_cq *cq)
-{
-       if (!cq->ib_cq.comp_handler)
-               return;
-
-       spin_lock(&cq->cb_lock);
-       cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
-       spin_unlock(&cq->cb_lock);
-
-       return;
-}
-
-static void print_error_data(struct ehca_shca *shca, void *data,
-                            u64 *rblock, int length)
-{
-       u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
-       u64 resource = rblock[1];
-
-       switch (type) {
-       case 0x1: /* Queue Pair */
-       {
-               struct ehca_qp *qp = (struct ehca_qp *)data;
-
-               /* only print error data if AER is set */
-               if (rblock[6] == 0)
-                       return;
-
-               ehca_err(&shca->ib_device,
-                        "QP 0x%x (resource=%llx) has errors.",
-                        qp->ib_qp.qp_num, resource);
-               break;
-       }
-       case 0x4: /* Completion Queue */
-       {
-               struct ehca_cq *cq = (struct ehca_cq *)data;
-
-               ehca_err(&shca->ib_device,
-                        "CQ 0x%x (resource=%llx) has errors.",
-                        cq->cq_number, resource);
-               break;
-       }
-       default:
-               ehca_err(&shca->ib_device,
-                        "Unknown error type: %llx on %s.",
-                        type, shca->ib_device.name);
-               break;
-       }
-
-       ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
-       ehca_err(&shca->ib_device, "EHCA ----- error data begin "
-                "---------------------------------------------------");
-       ehca_dmp(rblock, length, "resource=%llx", resource);
-       ehca_err(&shca->ib_device, "EHCA ----- error data end "
-                "----------------------------------------------------");
-
-       return;
-}
-
-int ehca_error_data(struct ehca_shca *shca, void *data,
-                   u64 resource)
-{
-
-       unsigned long ret;
-       u64 *rblock;
-       unsigned long block_count;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
-               ret = -ENOMEM;
-               goto error_data1;
-       }
-
-       /* rblock must be 4K aligned and should be 4K large */
-       ret = hipz_h_error_data(shca->ipz_hca_handle,
-                               resource,
-                               rblock,
-                               &block_count);
-
-       if (ret == H_R_STATE)
-               ehca_err(&shca->ib_device,
-                        "No error data is available: %llx.", resource);
-       else if (ret == H_SUCCESS) {
-               int length;
-
-               length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
-
-               if (length > EHCA_PAGESIZE)
-                       length = EHCA_PAGESIZE;
-
-               print_error_data(shca, data, rblock, length);
-       } else
-               ehca_err(&shca->ib_device,
-                        "Error data could not be fetched: %llx", resource);
-
-       ehca_free_fw_ctrlblock(rblock);
-
-error_data1:
-       return ret;
-
-}
-
-static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
-                             enum ib_event_type event_type)
-{
-       struct ib_event event;
-
-       /* PATH_MIG without the QP ever having been armed is false alarm */
-       if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
-               return;
-
-       event.device = &shca->ib_device;
-       event.event = event_type;
-
-       if (qp->ext_type == EQPT_SRQ) {
-               if (!qp->ib_srq.event_handler)
-                       return;
-
-               event.element.srq = &qp->ib_srq;
-               qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
-       } else {
-               if (!qp->ib_qp.event_handler)
-                       return;
-
-               event.element.qp = &qp->ib_qp;
-               qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
-       }
-}
-
-static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
-                             enum ib_event_type event_type, int fatal)
-{
-       struct ehca_qp *qp;
-       u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
-
-       read_lock(&ehca_qp_idr_lock);
-       qp = idr_find(&ehca_qp_idr, token);
-       if (qp)
-               atomic_inc(&qp->nr_events);
-       read_unlock(&ehca_qp_idr_lock);
-
-       if (!qp)
-               return;
-
-       if (fatal)
-               ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
-
-       dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
-                         IB_EVENT_SRQ_ERR : event_type);
-
-       /*
-        * eHCA only processes one WQE at a time for SRQ base QPs,
-        * so the last WQE has been processed as soon as the QP enters
-        * error state.
-        */
-       if (fatal && qp->ext_type == EQPT_SRQBASE)
-               dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
-
-       if (atomic_dec_and_test(&qp->nr_events))
-               wake_up(&qp->wait_completion);
-       return;
-}
-
-static void cq_event_callback(struct ehca_shca *shca,
-                             u64 eqe)
-{
-       struct ehca_cq *cq;
-       u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
-
-       read_lock(&ehca_cq_idr_lock);
-       cq = idr_find(&ehca_cq_idr, token);
-       if (cq)
-               atomic_inc(&cq->nr_events);
-       read_unlock(&ehca_cq_idr_lock);
-
-       if (!cq)
-               return;
-
-       ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
-
-       if (atomic_dec_and_test(&cq->nr_events))
-               wake_up(&cq->wait_completion);
-
-       return;
-}
-
-static void parse_identifier(struct ehca_shca *shca, u64 eqe)
-{
-       u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
-
-       switch (identifier) {
-       case 0x02: /* path migrated */
-               qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
-               break;
-       case 0x03: /* communication established */
-               qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
-               break;
-       case 0x04: /* send queue drained */
-               qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
-               break;
-       case 0x05: /* QP error */
-       case 0x06: /* QP error */
-               qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
-               break;
-       case 0x07: /* CQ error */
-       case 0x08: /* CQ error */
-               cq_event_callback(shca, eqe);
-               break;
-       case 0x09: /* MRMWPTE error */
-               ehca_err(&shca->ib_device, "MRMWPTE error.");
-               break;
-       case 0x0A: /* port event */
-               ehca_err(&shca->ib_device, "Port event.");
-               break;
-       case 0x0B: /* MR access error */
-               ehca_err(&shca->ib_device, "MR access error.");
-               break;
-       case 0x0C: /* EQ error */
-               ehca_err(&shca->ib_device, "EQ error.");
-               break;
-       case 0x0D: /* P/Q_Key mismatch */
-               ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
-               break;
-       case 0x10: /* sampling complete */
-               ehca_err(&shca->ib_device, "Sampling complete.");
-               break;
-       case 0x11: /* unaffiliated access error */
-               ehca_err(&shca->ib_device, "Unaffiliated access error.");
-               break;
-       case 0x12: /* path migrating */
-               ehca_err(&shca->ib_device, "Path migrating.");
-               break;
-       case 0x13: /* interface trace stopped */
-               ehca_err(&shca->ib_device, "Interface trace stopped.");
-               break;
-       case 0x14: /* first error capture info available */
-               ehca_info(&shca->ib_device, "First error capture available");
-               break;
-       case 0x15: /* SRQ limit reached */
-               qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
-               break;
-       default:
-               ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
-                        identifier, shca->ib_device.name);
-               break;
-       }
-
-       return;
-}
-
-static void dispatch_port_event(struct ehca_shca *shca, int port_num,
-                               enum ib_event_type type, const char *msg)
-{
-       struct ib_event event;
-
-       ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
-       event.device = &shca->ib_device;
-       event.event = type;
-       event.element.port_num = port_num;
-       ib_dispatch_event(&event);
-}
-
-static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
-{
-       struct ehca_sma_attr  new_attr;
-       struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
-
-       ehca_query_sma_attr(shca, port_num, &new_attr);
-
-       if (new_attr.sm_sl  != old_attr->sm_sl ||
-           new_attr.sm_lid != old_attr->sm_lid)
-               dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
-                                   "SM changed");
-
-       if (new_attr.lid != old_attr->lid ||
-           new_attr.lmc != old_attr->lmc)
-               dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
-                                   "LID changed");
-
-       if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
-           memcmp(new_attr.pkeys, old_attr->pkeys,
-                  sizeof(u16) * new_attr.pkey_tbl_len))
-               dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
-                                   "P_Key changed");
-
-       *old_attr = new_attr;
-}
-
-/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
-static int replay_modify_qp(struct ehca_sport *sport)
-{
-       int aqp1_destroyed;
-       unsigned long flags;
-
-       spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-
-       aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
-
-       if (sport->ibqp_sqp[IB_QPT_SMI])
-               ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
-       if (!aqp1_destroyed)
-               ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
-
-       spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-
-       return aqp1_destroyed;
-}
-
-static void parse_ec(struct ehca_shca *shca, u64 eqe)
-{
-       u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
-       u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
-       u8 spec_event;
-       struct ehca_sport *sport = &shca->sport[port - 1];
-
-       switch (ec) {
-       case 0x30: /* port availability change */
-               if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
-                       /* only replay modify_qp calls in autodetect mode;
-                        * if AQP1 was destroyed, the port is already down
-                        * again and we can drop the event.
-                        */
-                       if (ehca_nr_ports < 0)
-                               if (replay_modify_qp(sport))
-                                       break;
-
-                       sport->port_state = IB_PORT_ACTIVE;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
-                                           "is active");
-                       ehca_query_sma_attr(shca, port, &sport->saved_attr);
-               } else {
-                       sport->port_state = IB_PORT_DOWN;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
-                                           "is inactive");
-               }
-               break;
-       case 0x31:
-               /* port configuration change
-                * disruptive change is caused by
-                * LID, PKEY or SM change
-                */
-               if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
-                       ehca_warn(&shca->ib_device, "disruptive port "
-                                 "%d configuration change", port);
-
-                       sport->port_state = IB_PORT_DOWN;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
-                                           "is inactive");
-
-                       sport->port_state = IB_PORT_ACTIVE;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
-                                           "is active");
-                       ehca_query_sma_attr(shca, port,
-                                           &sport->saved_attr);
-               } else
-                       notify_port_conf_change(shca, port);
-               break;
-       case 0x32: /* adapter malfunction */
-               ehca_err(&shca->ib_device, "Adapter malfunction.");
-               break;
-       case 0x33:  /* trace stopped */
-               ehca_err(&shca->ib_device, "Traced stopped.");
-               break;
-       case 0x34: /* util async event */
-               spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
-               if (spec_event == 0x80) /* client reregister required */
-                       dispatch_port_event(shca, port,
-                                           IB_EVENT_CLIENT_REREGISTER,
-                                           "client reregister req.");
-               else
-                       ehca_warn(&shca->ib_device, "Unknown util async "
-                                 "event %x on port %x", spec_event, port);
-               break;
-       default:
-               ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
-                        ec, shca->ib_device.name);
-               break;
-       }
-
-       return;
-}
-
-static inline void reset_eq_pending(struct ehca_cq *cq)
-{
-       u64 CQx_EP;
-       struct h_galpa gal = cq->galpas.kernel;
-
-       hipz_galpa_store_cq(gal, cqx_ep, 0x0);
-       CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
-
-       return;
-}
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
-{
-       struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
-       tasklet_hi_schedule(&shca->neq.interrupt_task);
-
-       return IRQ_HANDLED;
-}
-
-void ehca_tasklet_neq(unsigned long data)
-{
-       struct ehca_shca *shca = (struct ehca_shca*)data;
-       struct ehca_eqe *eqe;
-       u64 ret;
-
-       eqe = ehca_poll_eq(shca, &shca->neq);
-
-       while (eqe) {
-               if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
-                       parse_ec(shca, eqe->entry);
-
-               eqe = ehca_poll_eq(shca, &shca->neq);
-       }
-
-       ret = hipz_h_reset_event(shca->ipz_hca_handle,
-                                shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
-
-       if (ret != H_SUCCESS)
-               ehca_err(&shca->ib_device, "Can't clear notification events.");
-
-       return;
-}
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
-{
-       struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
-       tasklet_hi_schedule(&shca->eq.interrupt_task);
-
-       return IRQ_HANDLED;
-}
-
-
-static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
-{
-       u64 eqe_value;
-       u32 token;
-       struct ehca_cq *cq;
-
-       eqe_value = eqe->entry;
-       ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
-       if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-               ehca_dbg(&shca->ib_device, "Got completion event");
-               token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-               read_lock(&ehca_cq_idr_lock);
-               cq = idr_find(&ehca_cq_idr, token);
-               if (cq)
-                       atomic_inc(&cq->nr_events);
-               read_unlock(&ehca_cq_idr_lock);
-               if (cq == NULL) {
-                       ehca_err(&shca->ib_device,
-                                "Invalid eqe for non-existing cq token=%x",
-                                token);
-                       return;
-               }
-               reset_eq_pending(cq);
-               if (ehca_scaling_code)
-                       queue_comp_task(cq);
-               else {
-                       comp_event_callback(cq);
-                       if (atomic_dec_and_test(&cq->nr_events))
-                               wake_up(&cq->wait_completion);
-               }
-       } else {
-               ehca_dbg(&shca->ib_device, "Got non completion event");
-               parse_identifier(shca, eqe_value);
-       }
-}
-
-void ehca_process_eq(struct ehca_shca *shca, int is_irq)
-{
-       struct ehca_eq *eq = &shca->eq;
-       struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
-       u64 eqe_value, ret;
-       int eqe_cnt, i;
-       int eq_empty = 0;
-
-       spin_lock(&eq->irq_spinlock);
-       if (is_irq) {
-               const int max_query_cnt = 100;
-               int query_cnt = 0;
-               int int_state = 1;
-               do {
-                       int_state = hipz_h_query_int_state(
-                               shca->ipz_hca_handle, eq->ist);
-                       query_cnt++;
-                       iosync();
-               } while (int_state && query_cnt < max_query_cnt);
-               if (unlikely((query_cnt == max_query_cnt)))
-                       ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
-                                int_state, query_cnt);
-       }
-
-       /* read out all eqes */
-       eqe_cnt = 0;
-       do {
-               u32 token;
-               eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
-               if (!eqe_cache[eqe_cnt].eqe)
-                       break;
-               eqe_value = eqe_cache[eqe_cnt].eqe->entry;
-               if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-                       token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-                       read_lock(&ehca_cq_idr_lock);
-                       eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
-                       if (eqe_cache[eqe_cnt].cq)
-                               atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
-                       read_unlock(&ehca_cq_idr_lock);
-                       if (!eqe_cache[eqe_cnt].cq) {
-                               ehca_err(&shca->ib_device,
-                                        "Invalid eqe for non-existing cq "
-                                        "token=%x", token);
-                               continue;
-                       }
-               } else
-                       eqe_cache[eqe_cnt].cq = NULL;
-               eqe_cnt++;
-       } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
-       if (!eqe_cnt) {
-               if (is_irq)
-                       ehca_dbg(&shca->ib_device,
-                                "No eqe found for irq event");
-               goto unlock_irq_spinlock;
-       } else if (!is_irq) {
-               ret = hipz_h_eoi(eq->ist);
-               if (ret != H_SUCCESS)
-                       ehca_err(&shca->ib_device,
-                                "bad return code EOI -rc = %lld\n", ret);
-               ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
-       }
-       if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
-               ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
-       /* enable irq for new packets */
-       for (i = 0; i < eqe_cnt; i++) {
-               if (eq->eqe_cache[i].cq)
-                       reset_eq_pending(eq->eqe_cache[i].cq);
-       }
-       /* check eq */
-       spin_lock(&eq->spinlock);
-       eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
-       spin_unlock(&eq->spinlock);
-       /* call completion handler for cached eqes */
-       for (i = 0; i < eqe_cnt; i++)
-               if (eq->eqe_cache[i].cq) {
-                       if (ehca_scaling_code)
-                               queue_comp_task(eq->eqe_cache[i].cq);
-                       else {
-                               struct ehca_cq *cq = eq->eqe_cache[i].cq;
-                               comp_event_callback(cq);
-                               if (atomic_dec_and_test(&cq->nr_events))
-                                       wake_up(&cq->wait_completion);
-                       }
-               } else {
-                       ehca_dbg(&shca->ib_device, "Got non completion event");
-                       parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
-               }
-       /* poll eq if not empty */
-       if (eq_empty)
-               goto unlock_irq_spinlock;
-       do {
-               struct ehca_eqe *eqe;
-               eqe = ehca_poll_eq(shca, &shca->eq);
-               if (!eqe)
-                       break;
-               process_eqe(shca, eqe);
-       } while (1);
-
-unlock_irq_spinlock:
-       spin_unlock(&eq->irq_spinlock);
-}
-
-void ehca_tasklet_eq(unsigned long data)
-{
-       ehca_process_eq((struct ehca_shca*)data, 1);
-}
-
-static int find_next_online_cpu(struct ehca_comp_pool *pool)
-{
-       int cpu;
-       unsigned long flags;
-
-       WARN_ON_ONCE(!in_interrupt());
-       if (ehca_debug_level >= 3)
-               ehca_dmp(cpu_online_mask, cpumask_size(), "");
-
-       spin_lock_irqsave(&pool->last_cpu_lock, flags);
-       do {
-               cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
-               if (cpu >= nr_cpu_ids)
-                       cpu = cpumask_first(cpu_online_mask);
-               pool->last_cpu = cpu;
-       } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
-       spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
-
-       return cpu;
-}
-
-static void __queue_comp_task(struct ehca_cq *__cq,
-                             struct ehca_cpu_comp_task *cct,
-                             struct task_struct *thread)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&cct->task_lock, flags);
-       spin_lock(&__cq->task_lock);
-
-       if (__cq->nr_callbacks == 0) {
-               __cq->nr_callbacks++;
-               list_add_tail(&__cq->entry, &cct->cq_list);
-               cct->cq_jobs++;
-               wake_up_process(thread);
-       } else
-               __cq->nr_callbacks++;
-
-       spin_unlock(&__cq->task_lock);
-       spin_unlock_irqrestore(&cct->task_lock, flags);
-}
-
-static void queue_comp_task(struct ehca_cq *__cq)
-{
-       int cpu_id;
-       struct ehca_cpu_comp_task *cct;
-       struct task_struct *thread;
-       int cq_jobs;
-       unsigned long flags;
-
-       cpu_id = find_next_online_cpu(pool);
-       BUG_ON(!cpu_online(cpu_id));
-
-       cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-       thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
-       BUG_ON(!cct || !thread);
-
-       spin_lock_irqsave(&cct->task_lock, flags);
-       cq_jobs = cct->cq_jobs;
-       spin_unlock_irqrestore(&cct->task_lock, flags);
-       if (cq_jobs > 0) {
-               cpu_id = find_next_online_cpu(pool);
-               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-               thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
-               BUG_ON(!cct || !thread);
-       }
-       __queue_comp_task(__cq, cct, thread);
-}
-
-static void run_comp_task(struct ehca_cpu_comp_task *cct)
-{
-       struct ehca_cq *cq;
-
-       while (!list_empty(&cct->cq_list)) {
-               cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
-               spin_unlock_irq(&cct->task_lock);
-
-               comp_event_callback(cq);
-               if (atomic_dec_and_test(&cq->nr_events))
-                       wake_up(&cq->wait_completion);
-
-               spin_lock_irq(&cct->task_lock);
-               spin_lock(&cq->task_lock);
-               cq->nr_callbacks--;
-               if (!cq->nr_callbacks) {
-                       list_del_init(cct->cq_list.next);
-                       cct->cq_jobs--;
-               }
-               spin_unlock(&cq->task_lock);
-       }
-}
-
-static void comp_task_park(unsigned int cpu)
-{
-       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-       struct ehca_cpu_comp_task *target;
-       struct task_struct *thread;
-       struct ehca_cq *cq, *tmp;
-       LIST_HEAD(list);
-
-       spin_lock_irq(&cct->task_lock);
-       cct->cq_jobs = 0;
-       cct->active = 0;
-       list_splice_init(&cct->cq_list, &list);
-       spin_unlock_irq(&cct->task_lock);
-
-       cpu = find_next_online_cpu(pool);
-       target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-       thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
-       spin_lock_irq(&target->task_lock);
-       list_for_each_entry_safe(cq, tmp, &list, entry) {
-               list_del(&cq->entry);
-               __queue_comp_task(cq, target, thread);
-       }
-       spin_unlock_irq(&target->task_lock);
-}
-
-static void comp_task_stop(unsigned int cpu, bool online)
-{
-       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
-       spin_lock_irq(&cct->task_lock);
-       cct->cq_jobs = 0;
-       cct->active = 0;
-       WARN_ON(!list_empty(&cct->cq_list));
-       spin_unlock_irq(&cct->task_lock);
-}
-
-static int comp_task_should_run(unsigned int cpu)
-{
-       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
-       return cct->cq_jobs;
-}
-
-static void comp_task(unsigned int cpu)
-{
-       struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
-       int cql_empty;
-
-       spin_lock_irq(&cct->task_lock);
-       cql_empty = list_empty(&cct->cq_list);
-       if (!cql_empty) {
-               __set_current_state(TASK_RUNNING);
-               run_comp_task(cct);
-       }
-       spin_unlock_irq(&cct->task_lock);
-}
-
-static struct smp_hotplug_thread comp_pool_threads = {
-       .thread_should_run      = comp_task_should_run,
-       .thread_fn              = comp_task,
-       .thread_comm            = "ehca_comp/%u",
-       .cleanup                = comp_task_stop,
-       .park                   = comp_task_park,
-};
-
-int ehca_create_comp_pool(void)
-{
-       int cpu, ret = -ENOMEM;
-
-       if (!ehca_scaling_code)
-               return 0;
-
-       pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
-       if (pool == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&pool->last_cpu_lock);
-       pool->last_cpu = cpumask_any(cpu_online_mask);
-
-       pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
-       if (!pool->cpu_comp_tasks)
-               goto out_pool;
-
-       pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
-       if (!pool->cpu_comp_threads)
-               goto out_tasks;
-
-       for_each_present_cpu(cpu) {
-               struct ehca_cpu_comp_task *cct;
-
-               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-               spin_lock_init(&cct->task_lock);
-               INIT_LIST_HEAD(&cct->cq_list);
-       }
-
-       comp_pool_threads.store = pool->cpu_comp_threads;
-       ret = smpboot_register_percpu_thread(&comp_pool_threads);
-       if (ret)
-               goto out_threads;
-
-       pr_info("eHCA scaling code enabled\n");
-       return ret;
-
-out_threads:
-       free_percpu(pool->cpu_comp_threads);
-out_tasks:
-       free_percpu(pool->cpu_comp_tasks);
-out_pool:
-       kfree(pool);
-       return ret;
-}
-
-void ehca_destroy_comp_pool(void)
-{
-       if (!ehca_scaling_code)
-               return;
-
-       smpboot_unregister_percpu_thread(&comp_pool_threads);
-
-       free_percpu(pool->cpu_comp_threads);
-       free_percpu(pool->cpu_comp_tasks);
-       kfree(pool);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644 (file)
index 5370199..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Function definitions and structs for EQs, NEQs and interrupts
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IRQ_H
-#define __EHCA_IRQ_H
-
-
-struct ehca_shca;
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
-void ehca_tasklet_neq(unsigned long data);
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
-void ehca_tasklet_eq(unsigned long data);
-void ehca_process_eq(struct ehca_shca *shca, int is_irq);
-
-struct ehca_cpu_comp_task {
-       struct list_head cq_list;
-       spinlock_t task_lock;
-       int cq_jobs;
-       int active;
-};
-
-struct ehca_comp_pool {
-       struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
-       struct task_struct * __percpu *cpu_comp_threads;
-       int last_cpu;
-       spinlock_t last_cpu_lock;
-};
-
-int ehca_create_comp_pool(void);
-void ehca_destroy_comp_pool(void);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644 (file)
index cca5933..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Function definitions for internal functions
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Dietmar Decker <ddecker@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IVERBS_H__
-#define __EHCA_IVERBS_H__
-
-#include "ehca_classes.h"
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                     struct ib_udata *uhw);
-
-int ehca_query_port(struct ib_device *ibdev, u8 port,
-                   struct ib_port_attr *props);
-
-enum rdma_protocol_type
-ehca_query_protocol(struct ib_device *device, u8 port_num);
-
-int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
-                       struct ehca_sma_attr *attr);
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
-                  union ib_gid *gid);
-
-int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
-                    struct ib_port_modify *props);
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
-                           struct ib_ucontext *context,
-                           struct ib_udata *udata);
-
-int ehca_dealloc_pd(struct ib_pd *pd);
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_destroy_ah(struct ib_ah *ah);
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                              u64 virt, int mr_access_flags,
-                              struct ib_udata *udata);
-
-int ehca_dereg_mr(struct ib_mr *mr);
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-
-int ehca_dealloc_mw(struct ib_mw *mw);
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
-                             int mr_access_flags,
-                             struct ib_fmr_attr *fmr_attr);
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
-                     u64 *page_list, int list_len, u64 iova);
-
-int ehca_unmap_fmr(struct list_head *fmr_list);
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr);
-
-enum ehca_eq_type {
-       EHCA_EQ = 0, /* Event Queue              */
-       EHCA_NEQ     /* Notification Event Queue */
-};
-
-int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
-                  enum ehca_eq_type type, const u32 length);
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
-                            const struct ib_cq_init_attr *attr,
-                            struct ib_ucontext *context,
-                            struct ib_udata *udata);
-
-int ehca_destroy_cq(struct ib_cq *cq);
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
-
-int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
-                            struct ib_qp_init_attr *init_attr,
-                            struct ib_udata *udata);
-
-int ehca_destroy_qp(struct ib_qp *qp);
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-                  struct ib_udata *udata);
-
-int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
-                 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
-                  struct ib_send_wr **bad_send_wr);
-
-int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
-                  struct ib_recv_wr **bad_recv_wr);
-
-int ehca_post_srq_recv(struct ib_srq *srq,
-                      struct ib_recv_wr *recv_wr,
-                      struct ib_recv_wr **bad_recv_wr);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
-                              struct ib_srq_init_attr *init_attr,
-                              struct ib_udata *udata);
-
-int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
-                   enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-
-int ehca_destroy_srq(struct ib_srq *srq);
-
-u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
-                   struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
-                                       struct ib_udata *udata);
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context);
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                    const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                    const struct ib_mad_hdr *in, size_t in_mad_size,
-                    struct ib_mad_hdr *out, size_t *out_mad_size,
-                    u16 *out_mad_pkey_index);
-
-void ehca_poll_eqs(unsigned long data);
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
-                 enum ib_rate path_rate, u32 *ipd);
-
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
-
-#ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(gfp_t flags);
-void ehca_free_fw_ctrlblock(void *ptr);
-#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
-#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
-#endif
-
-void ehca_recover_sqp(struct ib_qp *sqp);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644 (file)
index 832f22f..0000000
+++ /dev/null
@@ -1,1118 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  module start stop, hca detection
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <linux/slab.h>
-#endif
-
-#include <linux/notifier.h>
-#include <linux/memory.h>
-#include <rdma/ib_mad.h>
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-#define HCAD_VERSION "0029"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
-MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION(HCAD_VERSION);
-
-static bool ehca_open_aqp1    = 0;
-static int ehca_hw_level      = 0;
-static bool ehca_poll_all_eqs = 1;
-
-int ehca_debug_level   = 0;
-int ehca_nr_ports      = -1;
-bool ehca_use_hp_mr    = 0;
-int ehca_port_act_time = 30;
-int ehca_static_rate   = -1;
-bool ehca_scaling_code = 0;
-int ehca_lock_hcalls   = -1;
-int ehca_max_cq        = -1;
-int ehca_max_qp        = -1;
-
-module_param_named(open_aqp1,     ehca_open_aqp1,     bool, S_IRUGO);
-module_param_named(debug_level,   ehca_debug_level,   int,  S_IRUGO);
-module_param_named(hw_level,      ehca_hw_level,      int,  S_IRUGO);
-module_param_named(nr_ports,      ehca_nr_ports,      int,  S_IRUGO);
-module_param_named(use_hp_mr,     ehca_use_hp_mr,     bool, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int,  S_IRUGO);
-module_param_named(poll_all_eqs,  ehca_poll_all_eqs,  bool, S_IRUGO);
-module_param_named(static_rate,   ehca_static_rate,   int,  S_IRUGO);
-module_param_named(scaling_code,  ehca_scaling_code,  bool, S_IRUGO);
-module_param_named(lock_hcalls,   ehca_lock_hcalls,   bint, S_IRUGO);
-module_param_named(number_of_cqs, ehca_max_cq,        int,  S_IRUGO);
-module_param_named(number_of_qps, ehca_max_qp,        int,  S_IRUGO);
-
-MODULE_PARM_DESC(open_aqp1,
-                "Open AQP1 on startup (default: no)");
-MODULE_PARM_DESC(debug_level,
-                "Amount of debug output (0: none (default), 1: traces, "
-                "2: some dumps, 3: lots)");
-MODULE_PARM_DESC(hw_level,
-                "Hardware level (0: autosensing (default), "
-                "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
-MODULE_PARM_DESC(nr_ports,
-                "number of connected ports (-1: autodetect (default), "
-                "1: port one only, 2: two ports)");
-MODULE_PARM_DESC(use_hp_mr,
-                "Use high performance MRs (default: no)");
-MODULE_PARM_DESC(port_act_time,
-                "Time to wait for port activation (default: 30 sec)");
-MODULE_PARM_DESC(poll_all_eqs,
-                "Poll all event queues periodically (default: yes)");
-MODULE_PARM_DESC(static_rate,
-                "Set permanent static rate (default: no static rate)");
-MODULE_PARM_DESC(scaling_code,
-                "Enable scaling code (default: no)");
-MODULE_PARM_DESC(lock_hcalls,
-                "Serialize all hCalls made by the driver "
-                "(default: autodetect)");
-MODULE_PARM_DESC(number_of_cqs,
-               "Max number of CQs which can be allocated "
-               "(default: autodetect)");
-MODULE_PARM_DESC(number_of_qps,
-               "Max number of QPs which can be allocated "
-               "(default: autodetect)");
-
-DEFINE_RWLOCK(ehca_qp_idr_lock);
-DEFINE_RWLOCK(ehca_cq_idr_lock);
-DEFINE_IDR(ehca_qp_idr);
-DEFINE_IDR(ehca_cq_idr);
-
-static LIST_HEAD(shca_list); /* list of all registered ehcas */
-DEFINE_SPINLOCK(shca_list_lock);
-
-static struct timer_list poll_eqs_timer;
-
-#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache;
-
-void *ehca_alloc_fw_ctrlblock(gfp_t flags)
-{
-       void *ret = kmem_cache_zalloc(ctblk_cache, flags);
-       if (!ret)
-               ehca_gen_err("Out of memory for ctblk");
-       return ret;
-}
-
-void ehca_free_fw_ctrlblock(void *ptr)
-{
-       if (ptr)
-               kmem_cache_free(ctblk_cache, ptr);
-
-}
-#endif
-
-int ehca2ib_return_code(u64 ehca_rc)
-{
-       switch (ehca_rc) {
-       case H_SUCCESS:
-               return 0;
-       case H_RESOURCE:             /* Resource in use */
-       case H_BUSY:
-               return -EBUSY;
-       case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
-       case H_CONSTRAINED:          /* resource constraint */
-       case H_NO_MEM:
-               return -ENOMEM;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int ehca_create_slab_caches(void)
-{
-       int ret;
-
-       ret = ehca_init_pd_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create PD SLAB cache.");
-               return ret;
-       }
-
-       ret = ehca_init_cq_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create CQ SLAB cache.");
-               goto create_slab_caches2;
-       }
-
-       ret = ehca_init_qp_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create QP SLAB cache.");
-               goto create_slab_caches3;
-       }
-
-       ret = ehca_init_av_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create AV SLAB cache.");
-               goto create_slab_caches4;
-       }
-
-       ret = ehca_init_mrmw_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create MR&MW SLAB cache.");
-               goto create_slab_caches5;
-       }
-
-       ret = ehca_init_small_qp_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create small queue SLAB cache.");
-               goto create_slab_caches6;
-       }
-
-#ifdef CONFIG_PPC_64K_PAGES
-       ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
-                                       EHCA_PAGESIZE, H_CB_ALIGNMENT,
-                                       SLAB_HWCACHE_ALIGN,
-                                       NULL);
-       if (!ctblk_cache) {
-               ehca_gen_err("Cannot create ctblk SLAB cache.");
-               ehca_cleanup_small_qp_cache();
-               ret = -ENOMEM;
-               goto create_slab_caches6;
-       }
-#endif
-       return 0;
-
-create_slab_caches6:
-       ehca_cleanup_mrmw_cache();
-
-create_slab_caches5:
-       ehca_cleanup_av_cache();
-
-create_slab_caches4:
-       ehca_cleanup_qp_cache();
-
-create_slab_caches3:
-       ehca_cleanup_cq_cache();
-
-create_slab_caches2:
-       ehca_cleanup_pd_cache();
-
-       return ret;
-}
-
-static void ehca_destroy_slab_caches(void)
-{
-       ehca_cleanup_small_qp_cache();
-       ehca_cleanup_mrmw_cache();
-       ehca_cleanup_av_cache();
-       ehca_cleanup_qp_cache();
-       ehca_cleanup_cq_cache();
-       ehca_cleanup_pd_cache();
-#ifdef CONFIG_PPC_64K_PAGES
-       kmem_cache_destroy(ctblk_cache);
-#endif
-}
-
-#define EHCA_HCAAVER  EHCA_BMASK_IBM(32, 39)
-#define EHCA_REVID    EHCA_BMASK_IBM(40, 63)
-
-static struct cap_descr {
-       u64 mask;
-       char *descr;
-} hca_cap_descr[] = {
-       { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
-       { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
-       { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
-       { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
-       { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
-       { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
-       { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
-       { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
-       { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
-       { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
-       { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
-       { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
-       { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
-       { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
-       { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
-       { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
-       { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
-       { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
-};
-
-static int ehca_sense_attributes(struct ehca_shca *shca)
-{
-       int i, ret = 0;
-       u64 h_ret;
-       struct hipz_query_hca *rblock;
-       struct hipz_query_port *port;
-       const char *loc_code;
-
-       static const u32 pgsize_map[] = {
-               HCA_CAP_MR_PGSIZE_4K,  0x1000,
-               HCA_CAP_MR_PGSIZE_64K, 0x10000,
-               HCA_CAP_MR_PGSIZE_1M,  0x100000,
-               HCA_CAP_MR_PGSIZE_16M, 0x1000000,
-       };
-
-       ehca_gen_dbg("Probing adapter %s...",
-                    shca->ofdev->dev.of_node->full_name);
-       loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
-                                  NULL);
-       if (loc_code)
-               ehca_gen_dbg(" ... location lode=%s", loc_code);
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_gen_err("Cannot allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_gen_err("Cannot query device properties. h_ret=%lli",
-                            h_ret);
-               ret = -EPERM;
-               goto sense_attributes1;
-       }
-
-       if (ehca_nr_ports == 1)
-               shca->num_ports = 1;
-       else
-               shca->num_ports = (u8)rblock->num_ports;
-
-       ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
-
-       if (ehca_hw_level == 0) {
-               u32 hcaaver;
-               u32 revid;
-
-               hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
-               revid   = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
-
-               ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
-
-               if (hcaaver == 1) {
-                       if (revid <= 3)
-                               shca->hw_level = 0x10 | (revid + 1);
-                       else
-                               shca->hw_level = 0x14;
-               } else if (hcaaver == 2) {
-                       if (revid == 0)
-                               shca->hw_level = 0x21;
-                       else if (revid == 0x10)
-                               shca->hw_level = 0x22;
-                       else if (revid == 0x20 || revid == 0x21)
-                               shca->hw_level = 0x23;
-               }
-
-               if (!shca->hw_level) {
-                       ehca_gen_warn("unknown hardware version"
-                                     " - assuming default level");
-                       shca->hw_level = 0x22;
-               }
-       } else
-               shca->hw_level = ehca_hw_level;
-       ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
-
-       shca->hca_cap = rblock->hca_cap_indicators;
-       ehca_gen_dbg(" ... HCA capabilities:");
-       for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
-               if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
-                       ehca_gen_dbg("   %s", hca_cap_descr[i].descr);
-
-       /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
-        * a firmware property, so it's valid across all adapters
-        */
-       if (ehca_lock_hcalls == -1)
-               ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
-                                       shca->hca_cap);
-
-       /* translate supported MR page sizes; always support 4K */
-       shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
-       for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
-               if (rblock->memory_page_size_supported & pgsize_map[i])
-                       shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
-
-       /* Set maximum number of CQs and QPs to calculate EQ size */
-       if (shca->max_num_qps == -1)
-               shca->max_num_qps = min_t(int, rblock->max_qp,
-                                         EHCA_MAX_NUM_QUEUES);
-       else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
-               ehca_gen_warn("The requested number of QPs is out of range "
-                             "(1 - %i) specified by HW. Value is set to %i",
-                             rblock->max_qp, rblock->max_qp);
-               shca->max_num_qps = rblock->max_qp;
-       }
-
-       if (shca->max_num_cqs == -1)
-               shca->max_num_cqs = min_t(int, rblock->max_cq,
-                                         EHCA_MAX_NUM_QUEUES);
-       else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
-               ehca_gen_warn("The requested number of CQs is out of range "
-                             "(1 - %i) specified by HW. Value is set to %i",
-                             rblock->max_cq, rblock->max_cq);
-       }
-
-       /* query max MTU from first port -- it's the same for all ports */
-       port = (struct hipz_query_port *)rblock;
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
-       if (h_ret != H_SUCCESS) {
-               ehca_gen_err("Cannot query port properties. h_ret=%lli",
-                            h_ret);
-               ret = -EPERM;
-               goto sense_attributes1;
-       }
-
-       shca->max_mtu = port->max_mtu;
-
-sense_attributes1:
-       ehca_free_fw_ctrlblock(rblock);
-       return ret;
-}
-
-static int init_node_guid(struct ehca_shca *shca)
-{
-       int ret = 0;
-       struct hipz_query_hca *rblock;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query device properties");
-               ret = -EINVAL;
-               goto init_node_guid1;
-       }
-
-       memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
-
-init_node_guid1:
-       ehca_free_fw_ctrlblock(rblock);
-       return ret;
-}
-
-static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
-                              struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       err = ehca_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-       return 0;
-}
-
-static int ehca_init_device(struct ehca_shca *shca)
-{
-       int ret;
-
-       ret = init_node_guid(shca);
-       if (ret)
-               return ret;
-
-       strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
-       shca->ib_device.owner               = THIS_MODULE;
-
-       shca->ib_device.uverbs_abi_ver      = 8;
-       shca->ib_device.uverbs_cmd_mask     =
-               (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
-               (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
-               (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
-               (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
-               (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
-               (1ull << IB_USER_VERBS_CMD_REG_MR)              |
-               (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
-               (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-               (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
-               (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
-               (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
-               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
-               (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
-               (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
-
-       shca->ib_device.node_type           = RDMA_NODE_IB_CA;
-       shca->ib_device.phys_port_cnt       = shca->num_ports;
-       shca->ib_device.num_comp_vectors    = 1;
-       shca->ib_device.dma_device          = &shca->ofdev->dev;
-       shca->ib_device.query_device        = ehca_query_device;
-       shca->ib_device.query_port          = ehca_query_port;
-       shca->ib_device.query_gid           = ehca_query_gid;
-       shca->ib_device.query_pkey          = ehca_query_pkey;
-       /* shca->in_device.modify_device    = ehca_modify_device    */
-       shca->ib_device.modify_port         = ehca_modify_port;
-       shca->ib_device.alloc_ucontext      = ehca_alloc_ucontext;
-       shca->ib_device.dealloc_ucontext    = ehca_dealloc_ucontext;
-       shca->ib_device.alloc_pd            = ehca_alloc_pd;
-       shca->ib_device.dealloc_pd          = ehca_dealloc_pd;
-       shca->ib_device.create_ah           = ehca_create_ah;
-       /* shca->ib_device.modify_ah        = ehca_modify_ah;       */
-       shca->ib_device.query_ah            = ehca_query_ah;
-       shca->ib_device.destroy_ah          = ehca_destroy_ah;
-       shca->ib_device.create_qp           = ehca_create_qp;
-       shca->ib_device.modify_qp           = ehca_modify_qp;
-       shca->ib_device.query_qp            = ehca_query_qp;
-       shca->ib_device.destroy_qp          = ehca_destroy_qp;
-       shca->ib_device.post_send           = ehca_post_send;
-       shca->ib_device.post_recv           = ehca_post_recv;
-       shca->ib_device.create_cq           = ehca_create_cq;
-       shca->ib_device.destroy_cq          = ehca_destroy_cq;
-       shca->ib_device.resize_cq           = ehca_resize_cq;
-       shca->ib_device.poll_cq             = ehca_poll_cq;
-       /* shca->ib_device.peek_cq          = ehca_peek_cq;         */
-       shca->ib_device.req_notify_cq       = ehca_req_notify_cq;
-       /* shca->ib_device.req_ncomp_notif  = ehca_req_ncomp_notif; */
-       shca->ib_device.get_dma_mr          = ehca_get_dma_mr;
-       shca->ib_device.reg_user_mr         = ehca_reg_user_mr;
-       shca->ib_device.dereg_mr            = ehca_dereg_mr;
-       shca->ib_device.alloc_mw            = ehca_alloc_mw;
-       shca->ib_device.dealloc_mw          = ehca_dealloc_mw;
-       shca->ib_device.alloc_fmr           = ehca_alloc_fmr;
-       shca->ib_device.map_phys_fmr        = ehca_map_phys_fmr;
-       shca->ib_device.unmap_fmr           = ehca_unmap_fmr;
-       shca->ib_device.dealloc_fmr         = ehca_dealloc_fmr;
-       shca->ib_device.attach_mcast        = ehca_attach_mcast;
-       shca->ib_device.detach_mcast        = ehca_detach_mcast;
-       shca->ib_device.process_mad         = ehca_process_mad;
-       shca->ib_device.mmap                = ehca_mmap;
-       shca->ib_device.dma_ops             = &ehca_dma_mapping_ops;
-       shca->ib_device.get_port_immutable  = ehca_port_immutable;
-
-       if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-               shca->ib_device.uverbs_cmd_mask |=
-                       (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
-                       (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
-                       (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
-                       (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
-               shca->ib_device.create_srq          = ehca_create_srq;
-               shca->ib_device.modify_srq          = ehca_modify_srq;
-               shca->ib_device.query_srq           = ehca_query_srq;
-               shca->ib_device.destroy_srq         = ehca_destroy_srq;
-               shca->ib_device.post_srq_recv       = ehca_post_srq_recv;
-       }
-
-       return ret;
-}
-
-static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
-{
-       struct ehca_sport *sport = &shca->sport[port - 1];
-       struct ib_cq *ibcq;
-       struct ib_qp *ibqp;
-       struct ib_qp_init_attr qp_init_attr;
-       struct ib_cq_init_attr cq_attr = {};
-       int ret;
-
-       if (sport->ibcq_aqp1) {
-               ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
-               return -EPERM;
-       }
-
-       cq_attr.cqe = 10;
-       ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
-                           &cq_attr);
-       if (IS_ERR(ibcq)) {
-               ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
-               return PTR_ERR(ibcq);
-       }
-       sport->ibcq_aqp1 = ibcq;
-
-       if (sport->ibqp_sqp[IB_QPT_GSI]) {
-               ehca_err(&shca->ib_device, "AQP1 QP is already created.");
-               ret = -EPERM;
-               goto create_aqp1;
-       }
-
-       memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
-       qp_init_attr.send_cq          = ibcq;
-       qp_init_attr.recv_cq          = ibcq;
-       qp_init_attr.sq_sig_type      = IB_SIGNAL_ALL_WR;
-       qp_init_attr.cap.max_send_wr  = 100;
-       qp_init_attr.cap.max_recv_wr  = 100;
-       qp_init_attr.cap.max_send_sge = 2;
-       qp_init_attr.cap.max_recv_sge = 1;
-       qp_init_attr.qp_type          = IB_QPT_GSI;
-       qp_init_attr.port_num         = port;
-       qp_init_attr.qp_context       = NULL;
-       qp_init_attr.event_handler    = NULL;
-       qp_init_attr.srq              = NULL;
-
-       ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
-       if (IS_ERR(ibqp)) {
-               ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
-               ret = PTR_ERR(ibqp);
-               goto create_aqp1;
-       }
-       sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
-
-       return 0;
-
-create_aqp1:
-       ib_destroy_cq(sport->ibcq_aqp1);
-       return ret;
-}
-
-static int ehca_destroy_aqp1(struct ehca_sport *sport)
-{
-       int ret;
-
-       ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
-       if (ret) {
-               ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
-               return ret;
-       }
-
-       ret = ib_destroy_cq(sport->ibcq_aqp1);
-       if (ret)
-               ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
-
-       return ret;
-}
-
-static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
-}
-
-static ssize_t ehca_store_debug_level(struct device_driver *ddp,
-                                     const char *buf, size_t count)
-{
-       int value = (*buf) - '0';
-       if (value >= 0 && value <= 9)
-               ehca_debug_level = value;
-       return 1;
-}
-
-static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
-                  ehca_show_debug_level, ehca_store_debug_level);
-
-static struct attribute *ehca_drv_attrs[] = {
-       &driver_attr_debug_level.attr,
-       NULL
-};
-
-static struct attribute_group ehca_drv_attr_grp = {
-       .attrs = ehca_drv_attrs
-};
-
-static const struct attribute_group *ehca_drv_attr_groups[] = {
-       &ehca_drv_attr_grp,
-       NULL,
-};
-
-#define EHCA_RESOURCE_ATTR(name)                                           \
-static ssize_t  ehca_show_##name(struct device *dev,                       \
-                                struct device_attribute *attr,            \
-                                char *buf)                                \
-{                                                                         \
-       struct ehca_shca *shca;                                            \
-       struct hipz_query_hca *rblock;                                     \
-       int data;                                                          \
-                                                                          \
-       shca = dev_get_drvdata(dev);                                       \
-                                                                          \
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);                      \
-       if (!rblock) {                                                     \
-               dev_err(dev, "Can't allocate rblock memory.\n");           \
-               return 0;                                                  \
-       }                                                                  \
-                                                                          \
-       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
-               dev_err(dev, "Can't query device properties\n");           \
-               ehca_free_fw_ctrlblock(rblock);                            \
-               return 0;                                                  \
-       }                                                                  \
-                                                                          \
-       data = rblock->name;                                               \
-       ehca_free_fw_ctrlblock(rblock);                                    \
-                                                                          \
-       if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1))     \
-               return snprintf(buf, 256, "1\n");                          \
-       else                                                               \
-               return snprintf(buf, 256, "%d\n", data);                   \
-                                                                          \
-}                                                                         \
-static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
-
-EHCA_RESOURCE_ATTR(num_ports);
-EHCA_RESOURCE_ATTR(hw_ver);
-EHCA_RESOURCE_ATTR(max_eq);
-EHCA_RESOURCE_ATTR(cur_eq);
-EHCA_RESOURCE_ATTR(max_cq);
-EHCA_RESOURCE_ATTR(cur_cq);
-EHCA_RESOURCE_ATTR(max_qp);
-EHCA_RESOURCE_ATTR(cur_qp);
-EHCA_RESOURCE_ATTR(max_mr);
-EHCA_RESOURCE_ATTR(cur_mr);
-EHCA_RESOURCE_ATTR(max_mw);
-EHCA_RESOURCE_ATTR(cur_mw);
-EHCA_RESOURCE_ATTR(max_pd);
-EHCA_RESOURCE_ATTR(max_ah);
-
-static ssize_t ehca_show_adapter_handle(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct ehca_shca *shca = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
-
-}
-static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-
-static struct attribute *ehca_dev_attrs[] = {
-       &dev_attr_adapter_handle.attr,
-       &dev_attr_num_ports.attr,
-       &dev_attr_hw_ver.attr,
-       &dev_attr_max_eq.attr,
-       &dev_attr_cur_eq.attr,
-       &dev_attr_max_cq.attr,
-       &dev_attr_cur_cq.attr,
-       &dev_attr_max_qp.attr,
-       &dev_attr_cur_qp.attr,
-       &dev_attr_max_mr.attr,
-       &dev_attr_cur_mr.attr,
-       &dev_attr_max_mw.attr,
-       &dev_attr_cur_mw.attr,
-       &dev_attr_max_pd.attr,
-       &dev_attr_max_ah.attr,
-       NULL
-};
-
-static struct attribute_group ehca_dev_attr_grp = {
-       .attrs = ehca_dev_attrs
-};
-
-static int ehca_probe(struct platform_device *dev)
-{
-       struct ehca_shca *shca;
-       const u64 *handle;
-       struct ib_pd *ibpd;
-       int ret, i, eq_size;
-       unsigned long flags;
-
-       handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
-       if (!handle) {
-               ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
-                            dev->dev.of_node->full_name);
-               return -ENODEV;
-       }
-
-       if (!(*handle)) {
-               ehca_gen_err("Wrong eHCA handle for adapter: %s.",
-                            dev->dev.of_node->full_name);
-               return -ENODEV;
-       }
-
-       shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
-       if (!shca) {
-               ehca_gen_err("Cannot allocate shca memory.");
-               return -ENOMEM;
-       }
-
-       mutex_init(&shca->modify_mutex);
-       atomic_set(&shca->num_cqs, 0);
-       atomic_set(&shca->num_qps, 0);
-       shca->max_num_qps = ehca_max_qp;
-       shca->max_num_cqs = ehca_max_cq;
-
-       for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
-               spin_lock_init(&shca->sport[i].mod_sqp_lock);
-
-       shca->ofdev = dev;
-       shca->ipz_hca_handle.handle = *handle;
-       dev_set_drvdata(&dev->dev, shca);
-
-       ret = ehca_sense_attributes(shca);
-       if (ret < 0) {
-               ehca_gen_err("Cannot sense eHCA attributes.");
-               goto probe1;
-       }
-
-       ret = ehca_init_device(shca);
-       if (ret) {
-               ehca_gen_err("Cannot init ehca  device struct");
-               goto probe1;
-       }
-
-       eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
-       /* create event queues */
-       ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
-       if (ret) {
-               ehca_err(&shca->ib_device, "Cannot create EQ.");
-               goto probe1;
-       }
-
-       ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
-       if (ret) {
-               ehca_err(&shca->ib_device, "Cannot create NEQ.");
-               goto probe3;
-       }
-
-       /* create internal protection domain */
-       ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
-       if (IS_ERR(ibpd)) {
-               ehca_err(&shca->ib_device, "Cannot create internal PD.");
-               ret = PTR_ERR(ibpd);
-               goto probe4;
-       }
-
-       shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
-       shca->pd->ib_pd.device = &shca->ib_device;
-
-       /* create internal max MR */
-       ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
-
-       if (ret) {
-               ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
-                        ret);
-               goto probe5;
-       }
-
-       ret = ib_register_device(&shca->ib_device, NULL);
-       if (ret) {
-               ehca_err(&shca->ib_device,
-                        "ib_register_device() failed ret=%i", ret);
-               goto probe6;
-       }
-
-       /* create AQP1 for port 1 */
-       if (ehca_open_aqp1 == 1) {
-               shca->sport[0].port_state = IB_PORT_DOWN;
-               ret = ehca_create_aqp1(shca, 1);
-               if (ret) {
-                       ehca_err(&shca->ib_device,
-                                "Cannot create AQP1 for port 1.");
-                       goto probe7;
-               }
-       }
-
-       /* create AQP1 for port 2 */
-       if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
-               shca->sport[1].port_state = IB_PORT_DOWN;
-               ret = ehca_create_aqp1(shca, 2);
-               if (ret) {
-                       ehca_err(&shca->ib_device,
-                                "Cannot create AQP1 for port 2.");
-                       goto probe8;
-               }
-       }
-
-       ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-       if (ret) /* only complain; we can live without attributes */
-               ehca_err(&shca->ib_device,
-                        "Cannot create device attributes  ret=%d", ret);
-
-       spin_lock_irqsave(&shca_list_lock, flags);
-       list_add(&shca->shca_list, &shca_list);
-       spin_unlock_irqrestore(&shca_list_lock, flags);
-
-       return 0;
-
-probe8:
-       ret = ehca_destroy_aqp1(&shca->sport[0]);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy AQP1 for port 1. ret=%i", ret);
-
-probe7:
-       ib_unregister_device(&shca->ib_device);
-
-probe6:
-       ret = ehca_dereg_internal_maxmr(shca);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal MR. ret=%x", ret);
-
-probe5:
-       ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal PD. ret=%x", ret);
-
-probe4:
-       ret = ehca_destroy_eq(shca, &shca->neq);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy NEQ. ret=%x", ret);
-
-probe3:
-       ret = ehca_destroy_eq(shca, &shca->eq);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy EQ. ret=%x", ret);
-
-probe1:
-       ib_dealloc_device(&shca->ib_device);
-
-       return -EINVAL;
-}
-
-static int ehca_remove(struct platform_device *dev)
-{
-       struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
-       unsigned long flags;
-       int ret;
-
-       sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-
-       if (ehca_open_aqp1 == 1) {
-               int i;
-               for (i = 0; i < shca->num_ports; i++) {
-                       ret = ehca_destroy_aqp1(&shca->sport[i]);
-                       if (ret)
-                               ehca_err(&shca->ib_device,
-                                        "Cannot destroy AQP1 for port %x "
-                                        "ret=%i", ret, i);
-               }
-       }
-
-       ib_unregister_device(&shca->ib_device);
-
-       ret = ehca_dereg_internal_maxmr(shca);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal MR. ret=%i", ret);
-
-       ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal PD. ret=%i", ret);
-
-       ret = ehca_destroy_eq(shca, &shca->eq);
-       if (ret)
-               ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
-
-       ret = ehca_destroy_eq(shca, &shca->neq);
-       if (ret)
-               ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
-
-       ib_dealloc_device(&shca->ib_device);
-
-       spin_lock_irqsave(&shca_list_lock, flags);
-       list_del(&shca->shca_list);
-       spin_unlock_irqrestore(&shca_list_lock, flags);
-
-       return ret;
-}
-
-static struct of_device_id ehca_device_table[] =
-{
-       {
-               .name       = "lhca",
-               .compatible = "IBM,lhca",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, ehca_device_table);
-
-static struct platform_driver ehca_driver = {
-       .probe       = ehca_probe,
-       .remove      = ehca_remove,
-       .driver = {
-               .name = "ehca",
-               .owner = THIS_MODULE,
-               .groups = ehca_drv_attr_groups,
-               .of_match_table = ehca_device_table,
-       },
-};
-
-void ehca_poll_eqs(unsigned long data)
-{
-       struct ehca_shca *shca;
-
-       spin_lock(&shca_list_lock);
-       list_for_each_entry(shca, &shca_list, shca_list) {
-               if (shca->eq.is_initialized) {
-                       /* call deadman proc only if eq ptr does not change */
-                       struct ehca_eq *eq = &shca->eq;
-                       int max = 3;
-                       volatile u64 q_ofs, q_ofs2;
-                       unsigned long flags;
-                       spin_lock_irqsave(&eq->spinlock, flags);
-                       q_ofs = eq->ipz_queue.current_q_offset;
-                       spin_unlock_irqrestore(&eq->spinlock, flags);
-                       do {
-                               spin_lock_irqsave(&eq->spinlock, flags);
-                               q_ofs2 = eq->ipz_queue.current_q_offset;
-                               spin_unlock_irqrestore(&eq->spinlock, flags);
-                               max--;
-                       } while (q_ofs == q_ofs2 && max > 0);
-                       if (q_ofs == q_ofs2)
-                               ehca_process_eq(shca, 0);
-               }
-       }
-       mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
-       spin_unlock(&shca_list_lock);
-}
-
-static int ehca_mem_notifier(struct notifier_block *nb,
-                            unsigned long action, void *data)
-{
-       static unsigned long ehca_dmem_warn_time;
-       unsigned long flags;
-
-       switch (action) {
-       case MEM_CANCEL_OFFLINE:
-       case MEM_CANCEL_ONLINE:
-       case MEM_ONLINE:
-       case MEM_OFFLINE:
-               return NOTIFY_OK;
-       case MEM_GOING_ONLINE:
-       case MEM_GOING_OFFLINE:
-               /* only ok if no hca is attached to the lpar */
-               spin_lock_irqsave(&shca_list_lock, flags);
-               if (list_empty(&shca_list)) {
-                       spin_unlock_irqrestore(&shca_list_lock, flags);
-                       return NOTIFY_OK;
-               } else {
-                       spin_unlock_irqrestore(&shca_list_lock, flags);
-                       if (printk_timed_ratelimit(&ehca_dmem_warn_time,
-                                                  30 * 1000))
-                               ehca_gen_err("DMEM operations are not allowed"
-                                            "in conjunction with eHCA");
-                       return NOTIFY_BAD;
-               }
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block ehca_mem_nb = {
-       .notifier_call = ehca_mem_notifier,
-};
-
-static int __init ehca_module_init(void)
-{
-       int ret;
-
-       printk(KERN_INFO "eHCA Infiniband Device Driver "
-              "(Version " HCAD_VERSION ")\n");
-
-       ret = ehca_create_comp_pool();
-       if (ret) {
-               ehca_gen_err("Cannot create comp pool.");
-               return ret;
-       }
-
-       ret = ehca_create_slab_caches();
-       if (ret) {
-               ehca_gen_err("Cannot create SLAB caches");
-               ret = -ENOMEM;
-               goto module_init1;
-       }
-
-       ret = ehca_create_busmap();
-       if (ret) {
-               ehca_gen_err("Cannot create busmap.");
-               goto module_init2;
-       }
-
-       ret = ibmebus_register_driver(&ehca_driver);
-       if (ret) {
-               ehca_gen_err("Cannot register eHCA device driver");
-               ret = -EINVAL;
-               goto module_init3;
-       }
-
-       ret = register_memory_notifier(&ehca_mem_nb);
-       if (ret) {
-               ehca_gen_err("Failed registering memory add/remove notifier");
-               goto module_init4;
-       }
-
-       if (ehca_poll_all_eqs != 1) {
-               ehca_gen_err("WARNING!!!");
-               ehca_gen_err("It is possible to lose interrupts.");
-       } else {
-               init_timer(&poll_eqs_timer);
-               poll_eqs_timer.function = ehca_poll_eqs;
-               poll_eqs_timer.expires = jiffies + HZ;
-               add_timer(&poll_eqs_timer);
-       }
-
-       return 0;
-
-module_init4:
-       ibmebus_unregister_driver(&ehca_driver);
-
-module_init3:
-       ehca_destroy_busmap();
-
-module_init2:
-       ehca_destroy_slab_caches();
-
-module_init1:
-       ehca_destroy_comp_pool();
-       return ret;
-};
-
-static void __exit ehca_module_exit(void)
-{
-       if (ehca_poll_all_eqs == 1)
-               del_timer_sync(&poll_eqs_timer);
-
-       ibmebus_unregister_driver(&ehca_driver);
-
-       unregister_memory_notifier(&ehca_mem_nb);
-
-       ehca_destroy_busmap();
-
-       ehca_destroy_slab_caches();
-
-       ehca_destroy_comp_pool();
-
-       idr_destroy(&ehca_cq_idr);
-       idr_destroy(&ehca_qp_idr);
-};
-
-module_init(ehca_module_init);
-module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644 (file)
index cec1815..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  mcast  functions
- *
- *  Authors: Khadija Souissi <souissik@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define MAX_MC_LID 0xFFFE
-#define MIN_MC_LID 0xC000      /* Multicast limits */
-#define EHCA_VALID_MULTICAST_GID(gid)  ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid) \
-       (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
-
-int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-                                             ib_device);
-       union ib_gid my_gid;
-       u64 subnet_prefix, interface_id, h_ret;
-
-       if (ibqp->qp_type != IB_QPT_UD) {
-               ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
-               return -EINVAL;
-       }
-
-       if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-               ehca_err(ibqp->device, "invalid mulitcast gid");
-               return -EINVAL;
-       } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-               ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
-               return -EINVAL;
-       }
-
-       memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
-       subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
-       interface_id = be64_to_cpu(my_gid.global.interface_id);
-       h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
-                                  my_qp->ipz_qp_handle,
-                                  my_qp->galpas.kernel,
-                                  lid, subnet_prefix, interface_id);
-       if (h_ret != H_SUCCESS)
-               ehca_err(ibqp->device,
-                        "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
-                        "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
-       return ehca2ib_return_code(h_ret);
-}
-
-int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca = container_of(ibqp->pd->device,
-                                             struct ehca_shca, ib_device);
-       union ib_gid my_gid;
-       u64 subnet_prefix, interface_id, h_ret;
-
-       if (ibqp->qp_type != IB_QPT_UD) {
-               ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
-               return -EINVAL;
-       }
-
-       if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-               ehca_err(ibqp->device, "invalid mulitcast gid");
-               return -EINVAL;
-       } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-               ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
-               return -EINVAL;
-       }
-
-       memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
-       subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
-       interface_id = be64_to_cpu(my_gid.global.interface_id);
-       h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
-                                  my_qp->ipz_qp_handle,
-                                  my_qp->galpas.kernel,
-                                  lid, subnet_prefix, interface_id);
-       if (h_ret != H_SUCCESS)
-               ehca_err(ibqp->device,
-                        "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
-                        "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
-       return ehca2ib_return_code(h_ret);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644 (file)
index 3367205..0000000
+++ /dev/null
@@ -1,2202 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  MR/MW functions
- *
- *  Authors: Dietmar Decker <ddecker@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <rdma/ib_umem.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "hcp_if.h"
-#include "hipz_hw.h"
-
-#define NUM_CHUNKS(length, chunk_size) \
-       (((length) + (chunk_size - 1)) / (chunk_size))
-
-/* max number of rpages (per hcall register_rpages) */
-#define MAX_RPAGES 512
-
-/* DMEM toleration management */
-#define EHCA_SECTSHIFT        SECTION_SIZE_BITS
-#define EHCA_SECTSIZE          (1UL << EHCA_SECTSHIFT)
-#define EHCA_HUGEPAGESHIFT     34
-#define EHCA_HUGEPAGE_SIZE     (1UL << EHCA_HUGEPAGESHIFT)
-#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
-#define EHCA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
-#define EHCA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
-#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
-#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
-#define EHCA_TOP_MAP_SIZE (0x10000)               /* currently fixed map size */
-#define EHCA_DIR_MAP_SIZE (0x10000)
-#define EHCA_ENT_MAP_SIZE (0x10000)
-#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
-
-static unsigned long ehca_mr_len;
-
-/*
- * Memory map data structures
- */
-struct ehca_dir_bmap {
-       u64 ent[EHCA_MAP_ENTRIES];
-};
-struct ehca_top_bmap {
-       struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
-};
-struct ehca_bmap {
-       struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
-};
-
-static struct ehca_bmap *ehca_bmap;
-
-static struct kmem_cache *mr_cache;
-static struct kmem_cache *mw_cache;
-
-enum ehca_mr_pgsize {
-       EHCA_MR_PGSIZE4K  = 0x1000L,
-       EHCA_MR_PGSIZE64K = 0x10000L,
-       EHCA_MR_PGSIZE1M  = 0x100000L,
-       EHCA_MR_PGSIZE16M = 0x1000000L
-};
-
-#define EHCA_MR_PGSHIFT4K  12
-#define EHCA_MR_PGSHIFT64K 16
-#define EHCA_MR_PGSHIFT1M  20
-#define EHCA_MR_PGSHIFT16M 24
-
-static u64 ehca_map_vaddr(void *caddr);
-
-static u32 ehca_encode_hwpage_size(u32 pgsize)
-{
-       int log = ilog2(pgsize);
-       WARN_ON(log < 12 || log > 24 || log & 3);
-       return (log - 12) / 4;
-}
-
-static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
-{
-       return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
-}
-
-static struct ehca_mr *ehca_mr_new(void)
-{
-       struct ehca_mr *me;
-
-       me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
-       if (me)
-               spin_lock_init(&me->mrlock);
-       else
-               ehca_gen_err("alloc failed");
-
-       return me;
-}
-
-static void ehca_mr_delete(struct ehca_mr *me)
-{
-       kmem_cache_free(mr_cache, me);
-}
-
-static struct ehca_mw *ehca_mw_new(void)
-{
-       struct ehca_mw *me;
-
-       me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
-       if (me)
-               spin_lock_init(&me->mwlock);
-       else
-               ehca_gen_err("alloc failed");
-
-       return me;
-}
-
-static void ehca_mw_delete(struct ehca_mw *me)
-{
-       kmem_cache_free(mw_cache, me);
-}
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
-       struct ib_mr *ib_mr;
-       int ret;
-       struct ehca_mr *e_maxmr;
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-
-       if (shca->maxmr) {
-               e_maxmr = ehca_mr_new();
-               if (!e_maxmr) {
-                       ehca_err(&shca->ib_device, "out of memory");
-                       ib_mr = ERR_PTR(-ENOMEM);
-                       goto get_dma_mr_exit0;
-               }
-
-               ret = ehca_reg_maxmr(shca, e_maxmr,
-                                    (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
-                                    mr_access_flags, e_pd,
-                                    &e_maxmr->ib.ib_mr.lkey,
-                                    &e_maxmr->ib.ib_mr.rkey);
-               if (ret) {
-                       ehca_mr_delete(e_maxmr);
-                       ib_mr = ERR_PTR(ret);
-                       goto get_dma_mr_exit0;
-               }
-               ib_mr = &e_maxmr->ib.ib_mr;
-       } else {
-               ehca_err(&shca->ib_device, "no internal max-MR exist!");
-               ib_mr = ERR_PTR(-EINVAL);
-               goto get_dma_mr_exit0;
-       }
-
-get_dma_mr_exit0:
-       if (IS_ERR(ib_mr))
-               ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
-                        PTR_ERR(ib_mr), pd, mr_access_flags);
-       return ib_mr;
-} /* end ehca_get_dma_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                              u64 virt, int mr_access_flags,
-                              struct ib_udata *udata)
-{
-       struct ib_mr *ib_mr;
-       struct ehca_mr *e_mr;
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_mr_pginfo pginfo;
-       int ret, page_shift;
-       u32 num_kpages;
-       u32 num_hwpages;
-       u64 hwpage_size;
-
-       if (!pd) {
-               ehca_gen_err("bad pd=%p", pd);
-               return ERR_PTR(-EFAULT);
-       }
-
-       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
-           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-               /*
-                * Remote Write Access requires Local Write Access
-                * Remote Atomic Access requires Local Write Access
-                */
-               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-                        mr_access_flags);
-               ib_mr = ERR_PTR(-EINVAL);
-               goto reg_user_mr_exit0;
-       }
-
-       if (length == 0 || virt + length < virt) {
-               ehca_err(pd->device, "bad input values: length=%llx "
-                        "virt_base=%llx", length, virt);
-               ib_mr = ERR_PTR(-EINVAL);
-               goto reg_user_mr_exit0;
-       }
-
-       e_mr = ehca_mr_new();
-       if (!e_mr) {
-               ehca_err(pd->device, "out of memory");
-               ib_mr = ERR_PTR(-ENOMEM);
-               goto reg_user_mr_exit0;
-       }
-
-       e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
-                                mr_access_flags, 0);
-       if (IS_ERR(e_mr->umem)) {
-               ib_mr = (void *)e_mr->umem;
-               goto reg_user_mr_exit1;
-       }
-
-       if (e_mr->umem->page_size != PAGE_SIZE) {
-               ehca_err(pd->device, "page size not supported, "
-                        "e_mr->umem->page_size=%x", e_mr->umem->page_size);
-               ib_mr = ERR_PTR(-EINVAL);
-               goto reg_user_mr_exit2;
-       }
-
-       /* determine number of MR pages */
-       num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
-       /* select proper hw_pgsize */
-       page_shift = PAGE_SHIFT;
-       if (e_mr->umem->hugetlb) {
-               /* determine page_shift, clamp between 4K and 16M */
-               page_shift = (fls64(length - 1) + 3) & ~3;
-               page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
-                                EHCA_MR_PGSHIFT16M);
-       }
-       hwpage_size = 1UL << page_shift;
-
-       /* now that we have the desired page size, shift until it's
-        * supported, too. 4K is always supported, so this terminates.
-        */
-       while (!(hwpage_size & shca->hca_cap_mr_pgsize))
-               hwpage_size >>= 4;
-
-reg_user_mr_fallback:
-       num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
-       /* register MR on HCA */
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_USER;
-       pginfo.hwpage_size = hwpage_size;
-       pginfo.num_kpages = num_kpages;
-       pginfo.num_hwpages = num_hwpages;
-       pginfo.u.usr.region = e_mr->umem;
-       pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
-       pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
-       ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
-                         e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
-                         &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
-       if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
-               ehca_warn(pd->device, "failed to register mr "
-                         "with hwpage_size=%llx", hwpage_size);
-               ehca_info(pd->device, "try to register mr with "
-                         "kpage_size=%lx", PAGE_SIZE);
-               /*
-                * this means kpages are not contiguous for a hw page
-                * try kernel page size as fallback solution
-                */
-               hwpage_size = PAGE_SIZE;
-               goto reg_user_mr_fallback;
-       }
-       if (ret) {
-               ib_mr = ERR_PTR(ret);
-               goto reg_user_mr_exit2;
-       }
-
-       /* successful registration of all pages */
-       return &e_mr->ib.ib_mr;
-
-reg_user_mr_exit2:
-       ib_umem_release(e_mr->umem);
-reg_user_mr_exit1:
-       ehca_mr_delete(e_mr);
-reg_user_mr_exit0:
-       if (IS_ERR(ib_mr))
-               ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
-                        PTR_ERR(ib_mr), pd, mr_access_flags, udata);
-       return ib_mr;
-} /* end ehca_reg_user_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_mr(struct ib_mr *mr)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca =
-               container_of(mr->device, struct ehca_shca, ib_device);
-       struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-
-       if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
-               ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
-                        "e_mr->flags=%x", mr, e_mr, e_mr->flags);
-               ret = -EINVAL;
-               goto dereg_mr_exit0;
-       } else if (e_mr == shca->maxmr) {
-               /* should be impossible, however reject to be sure */
-               ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
-                        "shca->maxmr=%p mr->lkey=%x",
-                        mr, shca->maxmr, mr->lkey);
-               ret = -EINVAL;
-               goto dereg_mr_exit0;
-       }
-
-       /* TODO: BUSY: MR still has bound window(s) */
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
-                        "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
-                        h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
-                        e_mr->ipz_mr_handle.handle, mr->lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto dereg_mr_exit0;
-       }
-
-       if (e_mr->umem)
-               ib_umem_release(e_mr->umem);
-
-       /* successful deregistration */
-       ehca_mr_delete(e_mr);
-
-dereg_mr_exit0:
-       if (ret)
-               ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
-       return ret;
-} /* end ehca_dereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
-       struct ib_mw *ib_mw;
-       u64 h_ret;
-       struct ehca_mw *e_mw;
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-       struct ehca_mw_hipzout_parms hipzout;
-
-       if (type != IB_MW_TYPE_1)
-               return ERR_PTR(-EINVAL);
-
-       e_mw = ehca_mw_new();
-       if (!e_mw) {
-               ib_mw = ERR_PTR(-ENOMEM);
-               goto alloc_mw_exit0;
-       }
-
-       h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
-                                        e_pd->fw_pd, &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
-                        "shca=%p hca_hndl=%llx mw=%p",
-                        h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
-               ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
-               goto alloc_mw_exit1;
-       }
-       /* successful MW allocation */
-       e_mw->ipz_mw_handle = hipzout.handle;
-       e_mw->ib_mw.rkey    = hipzout.rkey;
-       return &e_mw->ib_mw;
-
-alloc_mw_exit1:
-       ehca_mw_delete(e_mw);
-alloc_mw_exit0:
-       if (IS_ERR(ib_mw))
-               ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
-       return ib_mw;
-} /* end ehca_alloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_mw(struct ib_mw *mw)
-{
-       u64 h_ret;
-       struct ehca_shca *shca =
-               container_of(mw->device, struct ehca_shca, ib_device);
-       struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
-
-       h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
-                        "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
-                        h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
-                        e_mw->ipz_mw_handle.handle);
-               return ehca2ib_return_code(h_ret);
-       }
-       /* successful deallocation */
-       ehca_mw_delete(e_mw);
-       return 0;
-} /* end ehca_dealloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
-                             int mr_access_flags,
-                             struct ib_fmr_attr *fmr_attr)
-{
-       struct ib_fmr *ib_fmr;
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_mr *e_fmr;
-       int ret;
-       u32 tmp_lkey, tmp_rkey;
-       struct ehca_mr_pginfo pginfo;
-       u64 hw_pgsize;
-
-       /* check other parameters */
-       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
-           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-               /*
-                * Remote Write Access requires Local Write Access
-                * Remote Atomic Access requires Local Write Access
-                */
-               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-                        mr_access_flags);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-       if (mr_access_flags & IB_ACCESS_MW_BIND) {
-               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-                        mr_access_flags);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-       if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
-               ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
-                        "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
-                        fmr_attr->max_pages, fmr_attr->max_maps,
-                        fmr_attr->page_shift);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-
-       hw_pgsize = 1 << fmr_attr->page_shift;
-       if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
-               ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
-                        fmr_attr->page_shift);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-
-       e_fmr = ehca_mr_new();
-       if (!e_fmr) {
-               ib_fmr = ERR_PTR(-ENOMEM);
-               goto alloc_fmr_exit0;
-       }
-       e_fmr->flags |= EHCA_MR_FLAG_FMR;
-
-       /* register MR on HCA */
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.hwpage_size = hw_pgsize;
-       /*
-        * pginfo.num_hwpages==0, ie register_rpages() will not be called
-        * but deferred to map_phys_fmr()
-        */
-       ret = ehca_reg_mr(shca, e_fmr, NULL,
-                         fmr_attr->max_pages * (1 << fmr_attr->page_shift),
-                         mr_access_flags, e_pd, &pginfo,
-                         &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
-       if (ret) {
-               ib_fmr = ERR_PTR(ret);
-               goto alloc_fmr_exit1;
-       }
-
-       /* successful */
-       e_fmr->hwpage_size = hw_pgsize;
-       e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
-       e_fmr->fmr_max_pages = fmr_attr->max_pages;
-       e_fmr->fmr_max_maps = fmr_attr->max_maps;
-       e_fmr->fmr_map_cnt = 0;
-       return &e_fmr->ib.ib_fmr;
-
-alloc_fmr_exit1:
-       ehca_mr_delete(e_fmr);
-alloc_fmr_exit0:
-       return ib_fmr;
-} /* end ehca_alloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
-                     u64 *page_list,
-                     int list_len,
-                     u64 iova)
-{
-       int ret;
-       struct ehca_shca *shca =
-               container_of(fmr->device, struct ehca_shca, ib_device);
-       struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-       struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
-       struct ehca_mr_pginfo pginfo;
-       u32 tmp_lkey, tmp_rkey;
-
-       if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-               ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-                        e_fmr, e_fmr->flags);
-               ret = -EINVAL;
-               goto map_phys_fmr_exit0;
-       }
-       ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
-       if (ret)
-               goto map_phys_fmr_exit0;
-       if (iova % e_fmr->fmr_page_size) {
-               /* only whole-numbered pages */
-               ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
-                        iova, e_fmr->fmr_page_size);
-               ret = -EINVAL;
-               goto map_phys_fmr_exit0;
-       }
-       if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
-               /* HCAD does not limit the maps, however trace this anyway */
-               ehca_info(fmr->device, "map limit exceeded, fmr=%p "
-                         "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
-                         fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
-       }
-
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_FMR;
-       pginfo.num_kpages = list_len;
-       pginfo.hwpage_size = e_fmr->hwpage_size;
-       pginfo.num_hwpages =
-               list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
-       pginfo.u.fmr.page_list = page_list;
-       pginfo.next_hwpage =
-               (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
-       pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
-
-       ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
-                           list_len * e_fmr->fmr_page_size,
-                           e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
-       if (ret)
-               goto map_phys_fmr_exit0;
-
-       /* successful reregistration */
-       e_fmr->fmr_map_cnt++;
-       e_fmr->ib.ib_fmr.lkey = tmp_lkey;
-       e_fmr->ib.ib_fmr.rkey = tmp_rkey;
-       return 0;
-
-map_phys_fmr_exit0:
-       if (ret)
-               ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
-                        "iova=%llx", ret, fmr, page_list, list_len, iova);
-       return ret;
-} /* end ehca_map_phys_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_fmr(struct list_head *fmr_list)
-{
-       int ret = 0;
-       struct ib_fmr *ib_fmr;
-       struct ehca_shca *shca = NULL;
-       struct ehca_shca *prev_shca;
-       struct ehca_mr *e_fmr;
-       u32 num_fmr = 0;
-       u32 unmap_fmr_cnt = 0;
-
-       /* check all FMR belong to same SHCA, and check internal flag */
-       list_for_each_entry(ib_fmr, fmr_list, list) {
-               prev_shca = shca;
-               shca = container_of(ib_fmr->device, struct ehca_shca,
-                                   ib_device);
-               e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
-               if ((shca != prev_shca) && prev_shca) {
-                       ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
-                                "prev_shca=%p e_fmr=%p",
-                                shca, prev_shca, e_fmr);
-                       ret = -EINVAL;
-                       goto unmap_fmr_exit0;
-               }
-               if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-                       ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
-                                "e_fmr->flags=%x", e_fmr, e_fmr->flags);
-                       ret = -EINVAL;
-                       goto unmap_fmr_exit0;
-               }
-               num_fmr++;
-       }
-
-       /* loop over all FMRs to unmap */
-       list_for_each_entry(ib_fmr, fmr_list, list) {
-               unmap_fmr_cnt++;
-               e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
-               shca = container_of(ib_fmr->device, struct ehca_shca,
-                                   ib_device);
-               ret = ehca_unmap_one_fmr(shca, e_fmr);
-               if (ret) {
-                       /* unmap failed, stop unmapping of rest of FMRs */
-                       ehca_err(&shca->ib_device, "unmap of one FMR failed, "
-                                "stop rest, e_fmr=%p num_fmr=%x "
-                                "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
-                                unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
-                       goto unmap_fmr_exit0;
-               }
-       }
-
-unmap_fmr_exit0:
-       if (ret)
-               ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
-                            ret, fmr_list, num_fmr, unmap_fmr_cnt);
-       return ret;
-} /* end ehca_unmap_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr)
-{
-       int ret;
-       u64 h_ret;
-       struct ehca_shca *shca =
-               container_of(fmr->device, struct ehca_shca, ib_device);
-       struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-
-       if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-               ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-                        e_fmr, e_fmr->flags);
-               ret = -EINVAL;
-               goto free_fmr_exit0;
-       }
-
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
-                        "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
-                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
-                        e_fmr->ipz_mr_handle.handle, fmr->lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto free_fmr_exit0;
-       }
-       /* successful deregistration */
-       ehca_mr_delete(e_fmr);
-       return 0;
-
-free_fmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
-       return ret;
-} /* end ehca_dealloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
-                                  struct ehca_mr *e_mr,
-                                  struct ehca_mr_pginfo *pginfo);
-
-int ehca_reg_mr(struct ehca_shca *shca,
-               struct ehca_mr *e_mr,
-               u64 *iova_start,
-               u64 size,
-               int acl,
-               struct ehca_pd *e_pd,
-               struct ehca_mr_pginfo *pginfo,
-               u32 *lkey, /*OUT*/
-               u32 *rkey, /*OUT*/
-               enum ehca_reg_type reg_type)
-{
-       int ret;
-       u64 h_ret;
-       u32 hipz_acl;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-       if (ehca_use_hp_mr == 1)
-               hipz_acl |= 0x00000001;
-
-       h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
-                                        (u64)iova_start, size, hipz_acl,
-                                        e_pd->fw_pd, &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
-                        "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
-               ret = ehca2ib_return_code(h_ret);
-               goto ehca_reg_mr_exit0;
-       }
-
-       e_mr->ipz_mr_handle = hipzout.handle;
-
-       if (reg_type == EHCA_REG_BUSMAP_MR)
-               ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
-       else if (reg_type == EHCA_REG_MR)
-               ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
-       else
-               ret = -EINVAL;
-
-       if (ret)
-               goto ehca_reg_mr_exit1;
-
-       /* successful registration */
-       e_mr->num_kpages = pginfo->num_kpages;
-       e_mr->num_hwpages = pginfo->num_hwpages;
-       e_mr->hwpage_size = pginfo->hwpage_size;
-       e_mr->start = iova_start;
-       e_mr->size = size;
-       e_mr->acl = acl;
-       *lkey = hipzout.lkey;
-       *rkey = hipzout.rkey;
-       return 0;
-
-ehca_reg_mr_exit1:
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
-                        "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
-                        "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
-                        h_ret, shca, e_mr, iova_start, size, acl, e_pd,
-                        hipzout.lkey, pginfo, pginfo->num_kpages,
-                        pginfo->num_hwpages, ret);
-               ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
-                        "not recoverable");
-       }
-ehca_reg_mr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-                        "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
-                        "num_kpages=%llx num_hwpages=%llx",
-                        ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
-                        pginfo->num_kpages, pginfo->num_hwpages);
-       return ret;
-} /* end ehca_reg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
-                      struct ehca_mr *e_mr,
-                      struct ehca_mr_pginfo *pginfo)
-{
-       int ret = 0;
-       u64 h_ret;
-       u32 rnum;
-       u64 rpage;
-       u32 i;
-       u64 *kpage;
-
-       if (!pginfo->num_hwpages) /* in case of fmr */
-               return 0;
-
-       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!kpage) {
-               ehca_err(&shca->ib_device, "kpage alloc failed");
-               ret = -ENOMEM;
-               goto ehca_reg_mr_rpages_exit0;
-       }
-
-       /* max MAX_RPAGES ehca mr pages per register call */
-       for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
-
-               if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
-                       rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
-                       if (rnum == 0)
-                               rnum = MAX_RPAGES;      /* last shot is full */
-               } else
-                       rnum = MAX_RPAGES;
-
-               ret = ehca_set_pagebuf(pginfo, rnum, kpage);
-               if (ret) {
-                       ehca_err(&shca->ib_device, "ehca_set_pagebuf "
-                                "bad rc, ret=%i rnum=%x kpage=%p",
-                                ret, rnum, kpage);
-                       goto ehca_reg_mr_rpages_exit1;
-               }
-
-               if (rnum > 1) {
-                       rpage = __pa(kpage);
-                       if (!rpage) {
-                               ehca_err(&shca->ib_device, "kpage=%p i=%x",
-                                        kpage, i);
-                               ret = -EFAULT;
-                               goto ehca_reg_mr_rpages_exit1;
-                       }
-               } else
-                       rpage = *kpage;
-
-               h_ret = hipz_h_register_rpage_mr(
-                       shca->ipz_hca_handle, e_mr,
-                       ehca_encode_hwpage_size(pginfo->hwpage_size),
-                       0, rpage, rnum);
-
-               if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
-                       /*
-                        * check for 'registration complete'==H_SUCCESS
-                        * and for 'page registered'==H_PAGE_REGISTERED
-                        */
-                       if (h_ret != H_SUCCESS) {
-                               ehca_err(&shca->ib_device, "last "
-                                        "hipz_reg_rpage_mr failed, h_ret=%lli "
-                                        "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
-                                        " lkey=%x", h_ret, e_mr, i,
-                                        shca->ipz_hca_handle.handle,
-                                        e_mr->ipz_mr_handle.handle,
-                                        e_mr->ib.ib_mr.lkey);
-                               ret = ehca2ib_return_code(h_ret);
-                               break;
-                       } else
-                               ret = 0;
-               } else if (h_ret != H_PAGE_REGISTERED) {
-                       ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
-                                "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
-                                "mr_hndl=%llx", h_ret, e_mr, i,
-                                e_mr->ib.ib_mr.lkey,
-                                shca->ipz_hca_handle.handle,
-                                e_mr->ipz_mr_handle.handle);
-                       ret = ehca2ib_return_code(h_ret);
-                       break;
-               } else
-                       ret = 0;
-       } /* end for(i) */
-
-
-ehca_reg_mr_rpages_exit1:
-       ehca_free_fw_ctrlblock(kpage);
-ehca_reg_mr_rpages_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
-                        "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
-                        pginfo, pginfo->num_kpages, pginfo->num_hwpages);
-       return ret;
-} /* end ehca_reg_mr_rpages() */
-
-/*----------------------------------------------------------------------*/
-
-inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
-                               struct ehca_mr *e_mr,
-                               u64 *iova_start,
-                               u64 size,
-                               u32 acl,
-                               struct ehca_pd *e_pd,
-                               struct ehca_mr_pginfo *pginfo,
-                               u32 *lkey, /*OUT*/
-                               u32 *rkey) /*OUT*/
-{
-       int ret;
-       u64 h_ret;
-       u32 hipz_acl;
-       u64 *kpage;
-       u64 rpage;
-       struct ehca_mr_pginfo pginfo_save;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-
-       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!kpage) {
-               ehca_err(&shca->ib_device, "kpage alloc failed");
-               ret = -ENOMEM;
-               goto ehca_rereg_mr_rereg1_exit0;
-       }
-
-       pginfo_save = *pginfo;
-       ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
-       if (ret) {
-               ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
-                        "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
-                        "kpage=%p", e_mr, pginfo, pginfo->type,
-                        pginfo->num_kpages, pginfo->num_hwpages, kpage);
-               goto ehca_rereg_mr_rereg1_exit1;
-       }
-       rpage = __pa(kpage);
-       if (!rpage) {
-               ehca_err(&shca->ib_device, "kpage=%p", kpage);
-               ret = -EFAULT;
-               goto ehca_rereg_mr_rereg1_exit1;
-       }
-       h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
-                                     (u64)iova_start, size, hipz_acl,
-                                     e_pd->fw_pd, rpage, &hipzout);
-       if (h_ret != H_SUCCESS) {
-               /*
-                * reregistration unsuccessful, try it again with the 3 hCalls,
-                * e.g. this is required in case H_MR_CONDITION
-                * (MW bound or MR is shared)
-                */
-               ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
-                         "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
-               *pginfo = pginfo_save;
-               ret = -EAGAIN;
-       } else if ((u64 *)hipzout.vaddr != iova_start) {
-               ehca_err(&shca->ib_device, "PHYP changed iova_start in "
-                        "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
-                        "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
-                        hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
-                        e_mr->ib.ib_mr.lkey, hipzout.lkey);
-               ret = -EFAULT;
-       } else {
-               /*
-                * successful reregistration
-                * note: start and start_out are identical for eServer HCAs
-                */
-               e_mr->num_kpages = pginfo->num_kpages;
-               e_mr->num_hwpages = pginfo->num_hwpages;
-               e_mr->hwpage_size = pginfo->hwpage_size;
-               e_mr->start = iova_start;
-               e_mr->size = size;
-               e_mr->acl = acl;
-               *lkey = hipzout.lkey;
-               *rkey = hipzout.rkey;
-       }
-
-ehca_rereg_mr_rereg1_exit1:
-       ehca_free_fw_ctrlblock(kpage);
-ehca_rereg_mr_rereg1_exit0:
-       if ( ret && (ret != -EAGAIN) )
-               ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
-                        "pginfo=%p num_kpages=%llx num_hwpages=%llx",
-                        ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
-                        pginfo->num_hwpages);
-       return ret;
-} /* end ehca_rereg_mr_rereg1() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_rereg_mr(struct ehca_shca *shca,
-                 struct ehca_mr *e_mr,
-                 u64 *iova_start,
-                 u64 size,
-                 int acl,
-                 struct ehca_pd *e_pd,
-                 struct ehca_mr_pginfo *pginfo,
-                 u32 *lkey,
-                 u32 *rkey)
-{
-       int ret = 0;
-       u64 h_ret;
-       int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
-       int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
-
-       /* first determine reregistration hCall(s) */
-       if ((pginfo->num_hwpages > MAX_RPAGES) ||
-           (e_mr->num_hwpages > MAX_RPAGES) ||
-           (pginfo->num_hwpages > e_mr->num_hwpages)) {
-               ehca_dbg(&shca->ib_device, "Rereg3 case, "
-                        "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
-                        pginfo->num_hwpages, e_mr->num_hwpages);
-               rereg_1_hcall = 0;
-               rereg_3_hcall = 1;
-       }
-
-       if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
-               rereg_1_hcall = 0;
-               rereg_3_hcall = 1;
-               e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
-               ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
-                        e_mr);
-       }
-
-       if (rereg_1_hcall) {
-               ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
-                                          acl, e_pd, pginfo, lkey, rkey);
-               if (ret) {
-                       if (ret == -EAGAIN)
-                               rereg_3_hcall = 1;
-                       else
-                               goto ehca_rereg_mr_exit0;
-               }
-       }
-
-       if (rereg_3_hcall) {
-               struct ehca_mr save_mr;
-
-               /* first deregister old MR */
-               h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-               if (h_ret != H_SUCCESS) {
-                       ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-                                "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
-                                "mr->lkey=%x",
-                                h_ret, e_mr, shca->ipz_hca_handle.handle,
-                                e_mr->ipz_mr_handle.handle,
-                                e_mr->ib.ib_mr.lkey);
-                       ret = ehca2ib_return_code(h_ret);
-                       goto ehca_rereg_mr_exit0;
-               }
-               /* clean ehca_mr_t, without changing struct ib_mr and lock */
-               save_mr = *e_mr;
-               ehca_mr_deletenew(e_mr);
-
-               /* set some MR values */
-               e_mr->flags = save_mr.flags;
-               e_mr->hwpage_size = save_mr.hwpage_size;
-               e_mr->fmr_page_size = save_mr.fmr_page_size;
-               e_mr->fmr_max_pages = save_mr.fmr_max_pages;
-               e_mr->fmr_max_maps = save_mr.fmr_max_maps;
-               e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
-
-               ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
-                                 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
-               if (ret) {
-                       u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
-                       memcpy(&e_mr->flags, &(save_mr.flags),
-                              sizeof(struct ehca_mr) - offset);
-                       goto ehca_rereg_mr_exit0;
-               }
-       }
-
-ehca_rereg_mr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-                        "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
-                        "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
-                        "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
-                        acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
-                        rereg_1_hcall, rereg_3_hcall);
-       return ret;
-} /* end ehca_rereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
-                      struct ehca_mr *e_fmr)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_pd *e_pd =
-               container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
-       struct ehca_mr save_fmr;
-       u32 tmp_lkey, tmp_rkey;
-       struct ehca_mr_pginfo pginfo;
-       struct ehca_mr_hipzout_parms hipzout;
-       struct ehca_mr save_mr;
-
-       if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
-               /*
-                * note: after using rereg hcall with len=0,
-                * rereg hcall must be used again for registering pages
-                */
-               h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
-                                             0, 0, e_pd->fw_pd, 0, &hipzout);
-               if (h_ret == H_SUCCESS) {
-                       /* successful reregistration */
-                       e_fmr->start = NULL;
-                       e_fmr->size = 0;
-                       tmp_lkey = hipzout.lkey;
-                       tmp_rkey = hipzout.rkey;
-                       return 0;
-               }
-               /*
-                * should not happen, because length checked above,
-                * FMRs are not shared and no MW bound to FMRs
-                */
-               ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
-                        "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
-                        "mr_hndl=%llx lkey=%x lkey_out=%x",
-                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
-                        e_fmr->ipz_mr_handle.handle,
-                        e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
-               /* try free and rereg */
-       }
-
-       /* first free old FMR */
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-                        "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
-                        "lkey=%x",
-                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
-                        e_fmr->ipz_mr_handle.handle,
-                        e_fmr->ib.ib_fmr.lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto ehca_unmap_one_fmr_exit0;
-       }
-       /* clean ehca_mr_t, without changing lock */
-       save_fmr = *e_fmr;
-       ehca_mr_deletenew(e_fmr);
-
-       /* set some MR values */
-       e_fmr->flags = save_fmr.flags;
-       e_fmr->hwpage_size = save_fmr.hwpage_size;
-       e_fmr->fmr_page_size = save_fmr.fmr_page_size;
-       e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
-       e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
-       e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
-       e_fmr->acl = save_fmr.acl;
-
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_FMR;
-       ret = ehca_reg_mr(shca, e_fmr, NULL,
-                         (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
-                         e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
-                         &tmp_rkey, EHCA_REG_MR);
-       if (ret) {
-               u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
-               memcpy(&e_fmr->flags, &(save_mr.flags),
-                      sizeof(struct ehca_mr) - offset);
-       }
-
-ehca_unmap_one_fmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
-                        "fmr_max_pages=%x",
-                        ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
-       return ret;
-} /* end ehca_unmap_one_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_smr(struct ehca_shca *shca,
-                struct ehca_mr *e_origmr,
-                struct ehca_mr *e_newmr,
-                u64 *iova_start,
-                int acl,
-                struct ehca_pd *e_pd,
-                u32 *lkey, /*OUT*/
-                u32 *rkey) /*OUT*/
-{
-       int ret = 0;
-       u64 h_ret;
-       u32 hipz_acl;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
-       h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
-                                   (u64)iova_start, hipz_acl, e_pd->fw_pd,
-                                   &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
-                        "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
-                        "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
-                        h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
-                        shca->ipz_hca_handle.handle,
-                        e_origmr->ipz_mr_handle.handle,
-                        e_origmr->ib.ib_mr.lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto ehca_reg_smr_exit0;
-       }
-       /* successful registration */
-       e_newmr->num_kpages = e_origmr->num_kpages;
-       e_newmr->num_hwpages = e_origmr->num_hwpages;
-       e_newmr->hwpage_size   = e_origmr->hwpage_size;
-       e_newmr->start = iova_start;
-       e_newmr->size = e_origmr->size;
-       e_newmr->acl = acl;
-       e_newmr->ipz_mr_handle = hipzout.handle;
-       *lkey = hipzout.lkey;
-       *rkey = hipzout.rkey;
-       return 0;
-
-ehca_reg_smr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
-                        "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
-                        ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-       return ret;
-} /* end ehca_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-static inline void *ehca_calc_sectbase(int top, int dir, int idx)
-{
-       unsigned long ret = idx;
-       ret |= dir << EHCA_DIR_INDEX_SHIFT;
-       ret |= top << EHCA_TOP_INDEX_SHIFT;
-       return __va(ret << SECTION_SIZE_BITS);
-}
-
-#define ehca_bmap_valid(entry) \
-       ((u64)entry != (u64)EHCA_INVAL_ADDR)
-
-static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
-                              struct ehca_shca *shca, struct ehca_mr *mr,
-                              struct ehca_mr_pginfo *pginfo)
-{
-       u64 h_ret = 0;
-       unsigned long page = 0;
-       u64 rpage = __pa(kpage);
-       int page_count;
-
-       void *sectbase = ehca_calc_sectbase(top, dir, idx);
-       if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
-               ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
-                                          "hwpage_size does not fit to "
-                                          "section start address");
-       }
-       page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
-
-       while (page < page_count) {
-               u64 rnum;
-               for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
-                    rnum++) {
-                       void *pg = sectbase + ((page++) * pginfo->hwpage_size);
-                       kpage[rnum] = __pa(pg);
-               }
-
-               h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
-                       ehca_encode_hwpage_size(pginfo->hwpage_size),
-                       0, rpage, rnum);
-
-               if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
-                       ehca_err(&shca->ib_device, "register_rpage_mr failed");
-                       return h_ret;
-               }
-       }
-       return h_ret;
-}
-
-static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
-                               struct ehca_shca *shca, struct ehca_mr *mr,
-                               struct ehca_mr_pginfo *pginfo)
-{
-       u64 hret = H_SUCCESS;
-       int idx;
-
-       for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
-                       continue;
-
-               hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
-                                          pginfo);
-               if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
-                               return hret;
-       }
-       return hret;
-}
-
-static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
-                                   struct ehca_mr *mr,
-                                   struct ehca_mr_pginfo *pginfo)
-{
-       u64 hret = H_SUCCESS;
-       int dir;
-
-       for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-                       continue;
-
-               hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
-               if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
-                               return hret;
-       }
-       return hret;
-}
-
-/* register internal max-MR to internal SHCA */
-int ehca_reg_internal_maxmr(
-       struct ehca_shca *shca,
-       struct ehca_pd *e_pd,
-       struct ehca_mr **e_maxmr)  /*OUT*/
-{
-       int ret;
-       struct ehca_mr *e_mr;
-       u64 *iova_start;
-       u64 size_maxmr;
-       struct ehca_mr_pginfo pginfo;
-       u32 num_kpages;
-       u32 num_hwpages;
-       u64 hw_pgsize;
-
-       if (!ehca_bmap) {
-               ret = -EFAULT;
-               goto ehca_reg_internal_maxmr_exit0;
-       }
-
-       e_mr = ehca_mr_new();
-       if (!e_mr) {
-               ehca_err(&shca->ib_device, "out of memory");
-               ret = -ENOMEM;
-               goto ehca_reg_internal_maxmr_exit0;
-       }
-       e_mr->flags |= EHCA_MR_FLAG_MAXMR;
-
-       /* register internal max-MR on HCA */
-       size_maxmr = ehca_mr_len;
-       iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
-       num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
-                               PAGE_SIZE);
-       hw_pgsize = ehca_get_max_hwpage_size(shca);
-       num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
-                                hw_pgsize);
-
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_PHYS;
-       pginfo.num_kpages = num_kpages;
-       pginfo.num_hwpages = num_hwpages;
-       pginfo.hwpage_size = hw_pgsize;
-       pginfo.u.phy.addr = 0;
-       pginfo.u.phy.size = size_maxmr;
-
-       ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
-                         &pginfo, &e_mr->ib.ib_mr.lkey,
-                         &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
-       if (ret) {
-               ehca_err(&shca->ib_device, "reg of internal max MR failed, "
-                        "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
-                        "num_hwpages=%x", e_mr, iova_start, size_maxmr,
-                        num_kpages, num_hwpages);
-               goto ehca_reg_internal_maxmr_exit1;
-       }
-
-       /* successful registration of all pages */
-       e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
-       e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
-       e_mr->ib.ib_mr.uobject = NULL;
-       atomic_inc(&(e_pd->ib_pd.usecnt));
-       *e_maxmr = e_mr;
-       return 0;
-
-ehca_reg_internal_maxmr_exit1:
-       ehca_mr_delete(e_mr);
-ehca_reg_internal_maxmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
-                        ret, shca, e_pd, e_maxmr);
-       return ret;
-} /* end ehca_reg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
-                  struct ehca_mr *e_newmr,
-                  u64 *iova_start,
-                  int acl,
-                  struct ehca_pd *e_pd,
-                  u32 *lkey,
-                  u32 *rkey)
-{
-       u64 h_ret;
-       struct ehca_mr *e_origmr = shca->maxmr;
-       u32 hipz_acl;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
-       h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
-                                   (u64)iova_start, hipz_acl, e_pd->fw_pd,
-                                   &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
-                        "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
-                        h_ret, e_origmr, shca->ipz_hca_handle.handle,
-                        e_origmr->ipz_mr_handle.handle,
-                        e_origmr->ib.ib_mr.lkey);
-               return ehca2ib_return_code(h_ret);
-       }
-       /* successful registration */
-       e_newmr->num_kpages = e_origmr->num_kpages;
-       e_newmr->num_hwpages = e_origmr->num_hwpages;
-       e_newmr->hwpage_size = e_origmr->hwpage_size;
-       e_newmr->start = iova_start;
-       e_newmr->size = e_origmr->size;
-       e_newmr->acl = acl;
-       e_newmr->ipz_mr_handle = hipzout.handle;
-       *lkey = hipzout.lkey;
-       *rkey = hipzout.rkey;
-       return 0;
-} /* end ehca_reg_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
-{
-       int ret;
-       struct ehca_mr *e_maxmr;
-       struct ib_pd *ib_pd;
-
-       if (!shca->maxmr) {
-               ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
-               ret = -EINVAL;
-               goto ehca_dereg_internal_maxmr_exit0;
-       }
-
-       e_maxmr = shca->maxmr;
-       ib_pd = e_maxmr->ib.ib_mr.pd;
-       shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
-
-       ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
-       if (ret) {
-               ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
-                        "ret=%i e_maxmr=%p shca=%p lkey=%x",
-                        ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
-               shca->maxmr = e_maxmr;
-               goto ehca_dereg_internal_maxmr_exit0;
-       }
-
-       atomic_dec(&ib_pd->usecnt);
-
-ehca_dereg_internal_maxmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
-                        ret, shca, shca->maxmr);
-       return ret;
-} /* end ehca_dereg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* check page list of map FMR verb for validness */
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
-                            u64 *page_list,
-                            int list_len)
-{
-       u32 i;
-       u64 *page;
-
-       if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
-               ehca_gen_err("bad list_len, list_len=%x "
-                            "e_fmr->fmr_max_pages=%x fmr=%p",
-                            list_len, e_fmr->fmr_max_pages, e_fmr);
-               return -EINVAL;
-       }
-
-       /* each page must be aligned */
-       page = page_list;
-       for (i = 0; i < list_len; i++) {
-               if (*page % e_fmr->fmr_page_size) {
-                       ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
-                                    "fmr_page_size=%x", i, *page, page, e_fmr,
-                                    e_fmr->fmr_page_size);
-                       return -EINVAL;
-               }
-               page++;
-       }
-
-       return 0;
-} /* end ehca_fmr_check_page_list() */
-
-/*----------------------------------------------------------------------*/
-
-/* PAGE_SIZE >= pginfo->hwpage_size */
-static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
-                                 u32 number,
-                                 u64 *kpage)
-{
-       int ret = 0;
-       u64 pgaddr;
-       u32 j = 0;
-       int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
-       struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
-       while (*sg != NULL) {
-               pgaddr = page_to_pfn(sg_page(*sg))
-                       << PAGE_SHIFT;
-               *kpage = pgaddr + (pginfo->next_hwpage *
-                                  pginfo->hwpage_size);
-               if (!(*kpage)) {
-                       ehca_gen_err("pgaddr=%llx "
-                                    "sg_dma_address=%llx "
-                                    "entry=%llx next_hwpage=%llx",
-                                    pgaddr, (u64)sg_dma_address(*sg),
-                                    pginfo->u.usr.next_nmap,
-                                    pginfo->next_hwpage);
-                       return -EFAULT;
-               }
-               (pginfo->hwpage_cnt)++;
-               (pginfo->next_hwpage)++;
-               kpage++;
-               if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
-                       (pginfo->kpage_cnt)++;
-                       (pginfo->u.usr.next_nmap)++;
-                       pginfo->next_hwpage = 0;
-                       *sg = sg_next(*sg);
-               }
-               j++;
-               if (j >= number)
-                       break;
-       }
-
-       return ret;
-}
-
-/*
- * check given pages for contiguous layout
- * last page addr is returned in prev_pgaddr for further check
- */
-static int ehca_check_kpages_per_ate(struct scatterlist **sg,
-                                    int num_pages,
-                                    u64 *prev_pgaddr)
-{
-       for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
-               u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
-               if (ehca_debug_level >= 3)
-                       ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
-                                    *(u64 *)__va(pgaddr));
-               if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
-                       ehca_gen_err("uncontiguous page found pgaddr=%llx "
-                                    "prev_pgaddr=%llx entries_left_in_hwpage=%x",
-                                    pgaddr, *prev_pgaddr, num_pages);
-                       return -EINVAL;
-               }
-               *prev_pgaddr = pgaddr;
-       }
-       return 0;
-}
-
-/* PAGE_SIZE < pginfo->hwpage_size */
-static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
-                                 u32 number,
-                                 u64 *kpage)
-{
-       int ret = 0;
-       u64 pgaddr, prev_pgaddr;
-       u32 j = 0;
-       int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
-       int nr_kpages = kpages_per_hwpage;
-       struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
-       while (*sg != NULL) {
-
-               if (nr_kpages == kpages_per_hwpage) {
-                       pgaddr = (page_to_pfn(sg_page(*sg))
-                                  << PAGE_SHIFT);
-                       *kpage = pgaddr;
-                       if (!(*kpage)) {
-                               ehca_gen_err("pgaddr=%llx entry=%llx",
-                                            pgaddr, pginfo->u.usr.next_nmap);
-                               ret = -EFAULT;
-                               return ret;
-                       }
-                       /*
-                        * The first page in a hwpage must be aligned;
-                        * the first MR page is exempt from this rule.
-                        */
-                       if (pgaddr & (pginfo->hwpage_size - 1)) {
-                               if (pginfo->hwpage_cnt) {
-                                       ehca_gen_err(
-                                               "invalid alignment "
-                                               "pgaddr=%llx entry=%llx "
-                                               "mr_pgsize=%llx",
-                                               pgaddr, pginfo->u.usr.next_nmap,
-                                               pginfo->hwpage_size);
-                                       ret = -EFAULT;
-                                       return ret;
-                               }
-                               /* first MR page */
-                               pginfo->kpage_cnt =
-                                       (pgaddr &
-                                        (pginfo->hwpage_size - 1)) >>
-                                       PAGE_SHIFT;
-                               nr_kpages -= pginfo->kpage_cnt;
-                               *kpage = pgaddr &
-                                        ~(pginfo->hwpage_size - 1);
-                       }
-                       if (ehca_debug_level >= 3) {
-                               u64 val = *(u64 *)__va(pgaddr);
-                               ehca_gen_dbg("kpage=%llx page=%llx "
-                                            "value=%016llx",
-                                            *kpage, pgaddr, val);
-                       }
-                       prev_pgaddr = pgaddr;
-                       *sg = sg_next(*sg);
-                       pginfo->kpage_cnt++;
-                       pginfo->u.usr.next_nmap++;
-                       nr_kpages--;
-                       if (!nr_kpages)
-                               goto next_kpage;
-                       continue;
-               }
-
-               ret = ehca_check_kpages_per_ate(sg, nr_kpages,
-                                               &prev_pgaddr);
-               if (ret)
-                       return ret;
-               pginfo->kpage_cnt += nr_kpages;
-               pginfo->u.usr.next_nmap += nr_kpages;
-
-next_kpage:
-               nr_kpages = kpages_per_hwpage;
-               (pginfo->hwpage_cnt)++;
-               kpage++;
-               j++;
-               if (j >= number)
-                       break;
-       }
-
-       return ret;
-}
-
-static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
-                                u32 number, u64 *kpage)
-{
-       int ret = 0;
-       u64 addr = pginfo->u.phy.addr;
-       u64 size = pginfo->u.phy.size;
-       u64 num_hw, offs_hw;
-       u32 i = 0;
-
-       num_hw  = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
-                               pginfo->hwpage_size);
-       offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
-
-       while (pginfo->next_hwpage < offs_hw + num_hw) {
-               /* sanity check */
-               if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
-                   (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
-                       ehca_gen_err("kpage_cnt >= num_kpages, "
-                                    "kpage_cnt=%llx num_kpages=%llx "
-                                    "hwpage_cnt=%llx "
-                                    "num_hwpages=%llx i=%x",
-                                    pginfo->kpage_cnt,
-                                    pginfo->num_kpages,
-                                    pginfo->hwpage_cnt,
-                                    pginfo->num_hwpages, i);
-                       return -EFAULT;
-               }
-               *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
-                        (pginfo->next_hwpage * pginfo->hwpage_size);
-               if ( !(*kpage) && addr ) {
-                       ehca_gen_err("addr=%llx size=%llx "
-                                    "next_hwpage=%llx", addr,
-                                    size, pginfo->next_hwpage);
-                       return -EFAULT;
-               }
-               (pginfo->hwpage_cnt)++;
-               (pginfo->next_hwpage)++;
-               if (PAGE_SIZE >= pginfo->hwpage_size) {
-                       if (pginfo->next_hwpage %
-                           (PAGE_SIZE / pginfo->hwpage_size) == 0)
-                               (pginfo->kpage_cnt)++;
-               } else
-                       pginfo->kpage_cnt += pginfo->hwpage_size /
-                               PAGE_SIZE;
-               kpage++;
-               i++;
-               if (i >= number) break;
-       }
-       if (pginfo->next_hwpage >= offs_hw + num_hw) {
-               pginfo->next_hwpage = 0;
-       }
-
-       return ret;
-}
-
-static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
-                               u32 number, u64 *kpage)
-{
-       int ret = 0;
-       u64 *fmrlist;
-       u32 i;
-
-       /* loop over desired page_list entries */
-       fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
-       for (i = 0; i < number; i++) {
-               *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
-                          pginfo->next_hwpage * pginfo->hwpage_size;
-               if ( !(*kpage) ) {
-                       ehca_gen_err("*fmrlist=%llx fmrlist=%p "
-                                    "next_listelem=%llx next_hwpage=%llx",
-                                    *fmrlist, fmrlist,
-                                    pginfo->u.fmr.next_listelem,
-                                    pginfo->next_hwpage);
-                       return -EFAULT;
-               }
-               (pginfo->hwpage_cnt)++;
-               if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
-                       if (pginfo->next_hwpage %
-                           (pginfo->u.fmr.fmr_pgsize /
-                            pginfo->hwpage_size) == 0) {
-                               (pginfo->kpage_cnt)++;
-                               (pginfo->u.fmr.next_listelem)++;
-                               fmrlist++;
-                               pginfo->next_hwpage = 0;
-                       } else
-                               (pginfo->next_hwpage)++;
-               } else {
-                       unsigned int cnt_per_hwpage = pginfo->hwpage_size /
-                               pginfo->u.fmr.fmr_pgsize;
-                       unsigned int j;
-                       u64 prev = *kpage;
-                       /* check if adrs are contiguous */
-                       for (j = 1; j < cnt_per_hwpage; j++) {
-                               u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
-                               if (prev + pginfo->u.fmr.fmr_pgsize != p) {
-                                       ehca_gen_err("uncontiguous fmr pages "
-                                                    "found prev=%llx p=%llx "
-                                                    "idx=%x", prev, p, i + j);
-                                       return -EINVAL;
-                               }
-                               prev = p;
-                       }
-                       pginfo->kpage_cnt += cnt_per_hwpage;
-                       pginfo->u.fmr.next_listelem += cnt_per_hwpage;
-                       fmrlist += cnt_per_hwpage;
-               }
-               kpage++;
-       }
-       return ret;
-}
-
-/* setup page buffer from page info */
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
-                    u32 number,
-                    u64 *kpage)
-{
-       int ret;
-
-       switch (pginfo->type) {
-       case EHCA_MR_PGI_PHYS:
-               ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
-               break;
-       case EHCA_MR_PGI_USER:
-               ret = PAGE_SIZE >= pginfo->hwpage_size ?
-                       ehca_set_pagebuf_user1(pginfo, number, kpage) :
-                       ehca_set_pagebuf_user2(pginfo, number, kpage);
-               break;
-       case EHCA_MR_PGI_FMR:
-               ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
-               break;
-       default:
-               ehca_gen_err("bad pginfo->type=%x", pginfo->type);
-               ret = -EFAULT;
-               break;
-       }
-       return ret;
-} /* end ehca_set_pagebuf() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * check MR if it is a max-MR, i.e. uses whole memory
- * in case it's a max-MR 1 is returned, else 0
- */
-int ehca_mr_is_maxmr(u64 size,
-                    u64 *iova_start)
-{
-       /* a MR is treated as max-MR only if it fits following: */
-       if ((size == ehca_mr_len) &&
-           (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
-               ehca_gen_dbg("this is a max-MR");
-               return 1;
-       } else
-               return 0;
-} /* end ehca_mr_is_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* map access control for MR/MW. This routine is used for MR and MW. */
-void ehca_mrmw_map_acl(int ib_acl,
-                      u32 *hipz_acl)
-{
-       *hipz_acl = 0;
-       if (ib_acl & IB_ACCESS_REMOTE_READ)
-               *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
-       if (ib_acl & IB_ACCESS_REMOTE_WRITE)
-               *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
-       if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
-               *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
-       if (ib_acl & IB_ACCESS_LOCAL_WRITE)
-               *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
-       if (ib_acl & IB_ACCESS_MW_BIND)
-               *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
-} /* end ehca_mrmw_map_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/* sets page size in hipz access control for MR/MW. */
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
-{
-       *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
-} /* end ehca_mrmw_set_pgsize_hipz_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * reverse map access control for MR/MW.
- * This routine is used for MR and MW.
- */
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
-                              int *ib_acl) /*OUT*/
-{
-       *ib_acl = 0;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
-               *ib_acl |= IB_ACCESS_REMOTE_READ;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
-               *ib_acl |= IB_ACCESS_REMOTE_WRITE;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
-               *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
-               *ib_acl |= IB_ACCESS_LOCAL_WRITE;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
-               *ib_acl |= IB_ACCESS_MW_BIND;
-} /* end ehca_mrmw_reverse_map_acl() */
-
-
-/*----------------------------------------------------------------------*/
-
-/*
- * MR destructor and constructor
- * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
- * except struct ib_mr and spinlock
- */
-void ehca_mr_deletenew(struct ehca_mr *mr)
-{
-       mr->flags = 0;
-       mr->num_kpages = 0;
-       mr->num_hwpages = 0;
-       mr->acl = 0;
-       mr->start = NULL;
-       mr->fmr_page_size = 0;
-       mr->fmr_max_pages = 0;
-       mr->fmr_max_maps = 0;
-       mr->fmr_map_cnt = 0;
-       memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
-       memset(&mr->galpas, 0, sizeof(mr->galpas));
-} /* end ehca_mr_deletenew() */
-
-int ehca_init_mrmw_cache(void)
-{
-       mr_cache = kmem_cache_create("ehca_cache_mr",
-                                    sizeof(struct ehca_mr), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!mr_cache)
-               return -ENOMEM;
-       mw_cache = kmem_cache_create("ehca_cache_mw",
-                                    sizeof(struct ehca_mw), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!mw_cache) {
-               kmem_cache_destroy(mr_cache);
-               mr_cache = NULL;
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-void ehca_cleanup_mrmw_cache(void)
-{
-       kmem_cache_destroy(mr_cache);
-       kmem_cache_destroy(mw_cache);
-}
-
-static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
-                                    int dir)
-{
-       if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
-               ehca_top_bmap->dir[dir] =
-                       kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
-               if (!ehca_top_bmap->dir[dir])
-                       return -ENOMEM;
-               /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-               memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
-       }
-       return 0;
-}
-
-static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
-{
-       if (!ehca_bmap_valid(ehca_bmap->top[top])) {
-               ehca_bmap->top[top] =
-                       kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
-               if (!ehca_bmap->top[top])
-                       return -ENOMEM;
-               /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-               memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
-       }
-       return ehca_init_top_bmap(ehca_bmap->top[top], dir);
-}
-
-static inline int ehca_calc_index(unsigned long i, unsigned long s)
-{
-       return (i >> s) & EHCA_INDEX_MASK;
-}
-
-void ehca_destroy_busmap(void)
-{
-       int top, dir;
-
-       if (!ehca_bmap)
-               return;
-
-       for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]))
-                       continue;
-               for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
-                       if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-                               continue;
-
-                       kfree(ehca_bmap->top[top]->dir[dir]);
-               }
-
-               kfree(ehca_bmap->top[top]);
-       }
-
-       kfree(ehca_bmap);
-       ehca_bmap = NULL;
-}
-
-static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
-{
-       unsigned long i, start_section, end_section;
-       int top, dir, idx;
-
-       if (!nr_pages)
-               return 0;
-
-       if (!ehca_bmap) {
-               ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
-               if (!ehca_bmap)
-                       return -ENOMEM;
-               /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-               memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
-       }
-
-       start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
-       end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
-       for (i = start_section; i < end_section; i++) {
-               int ret;
-               top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
-               dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
-               idx = i & EHCA_INDEX_MASK;
-
-               ret = ehca_init_bmap(ehca_bmap, top, dir);
-               if (ret) {
-                       ehca_destroy_busmap();
-                       return ret;
-               }
-               ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
-               ehca_mr_len += EHCA_SECTSIZE;
-       }
-       return 0;
-}
-
-static int ehca_is_hugepage(unsigned long pfn)
-{
-       int page_order;
-
-       if (pfn & EHCA_HUGEPAGE_PFN_MASK)
-               return 0;
-
-       page_order = compound_order(pfn_to_page(pfn));
-       if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
-               return 0;
-
-       return 1;
-}
-
-static int ehca_create_busmap_callback(unsigned long initial_pfn,
-                                      unsigned long total_nr_pages, void *arg)
-{
-       int ret;
-       unsigned long pfn, start_pfn, end_pfn, nr_pages;
-
-       if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
-               return ehca_update_busmap(initial_pfn, total_nr_pages);
-
-       /* Given chunk is >= 16GB -> check for hugepages */
-       start_pfn = initial_pfn;
-       end_pfn = initial_pfn + total_nr_pages;
-       pfn = start_pfn;
-
-       while (pfn < end_pfn) {
-               if (ehca_is_hugepage(pfn)) {
-                       /* Add mem found in front of the hugepage */
-                       nr_pages = pfn - start_pfn;
-                       ret = ehca_update_busmap(start_pfn, nr_pages);
-                       if (ret)
-                               return ret;
-                       /* Skip the hugepage */
-                       pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
-                       start_pfn = pfn;
-               } else
-                       pfn += (EHCA_SECTSIZE / PAGE_SIZE);
-       }
-
-       /* Add mem found behind the hugepage(s)  */
-       nr_pages = pfn - start_pfn;
-       return ehca_update_busmap(start_pfn, nr_pages);
-}
-
-int ehca_create_busmap(void)
-{
-       int ret;
-
-       ehca_mr_len = 0;
-       ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
-                                  ehca_create_busmap_callback);
-       return ret;
-}
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
-                                  struct ehca_mr *e_mr,
-                                  struct ehca_mr_pginfo *pginfo)
-{
-       int top;
-       u64 hret, *kpage;
-
-       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!kpage) {
-               ehca_err(&shca->ib_device, "kpage alloc failed");
-               return -ENOMEM;
-       }
-       for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]))
-                       continue;
-               hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
-               if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
-                       break;
-       }
-
-       ehca_free_fw_ctrlblock(kpage);
-
-       if (hret == H_SUCCESS)
-               return 0; /* Everything is fine */
-       else {
-               ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
-                                "h_ret=%lli e_mr=%p top=%x lkey=%x "
-                                "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
-                                e_mr->ib.ib_mr.lkey,
-                                shca->ipz_hca_handle.handle,
-                                e_mr->ipz_mr_handle.handle);
-               return ehca2ib_return_code(hret);
-       }
-}
-
-static u64 ehca_map_vaddr(void *caddr)
-{
-       int top, dir, idx;
-       unsigned long abs_addr, offset;
-       u64 entry;
-
-       if (!ehca_bmap)
-               return EHCA_INVAL_ADDR;
-
-       abs_addr = __pa(caddr);
-       top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
-       if (!ehca_bmap_valid(ehca_bmap->top[top]))
-               return EHCA_INVAL_ADDR;
-
-       dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
-       if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-               return EHCA_INVAL_ADDR;
-
-       idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
-
-       entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
-       if (ehca_bmap_valid(entry)) {
-               offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
-               return entry | offset;
-       } else
-               return EHCA_INVAL_ADDR;
-}
-
-static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-       return dma_addr == EHCA_INVAL_ADDR;
-}
-
-static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
-                              size_t size, enum dma_data_direction direction)
-{
-       if (cpu_addr)
-               return ehca_map_vaddr(cpu_addr);
-       else
-               return EHCA_INVAL_ADDR;
-}
-
-static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
-                                 enum dma_data_direction direction)
-{
-       /* This is only a stub; nothing to be done here */
-}
-
-static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
-                            unsigned long offset, size_t size,
-                            enum dma_data_direction direction)
-{
-       u64 addr;
-
-       if (offset + size > PAGE_SIZE)
-               return EHCA_INVAL_ADDR;
-
-       addr = ehca_map_vaddr(page_address(page));
-       if (!ehca_dma_mapping_error(dev, addr))
-               addr += offset;
-
-       return addr;
-}
-
-static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
-                               enum dma_data_direction direction)
-{
-       /* This is only a stub; nothing to be done here */
-}
-
-static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
-                          int nents, enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i) {
-               u64 addr;
-               addr = ehca_map_vaddr(sg_virt(sg));
-               if (ehca_dma_mapping_error(dev, addr))
-                       return 0;
-
-               sg->dma_address = addr;
-               sg->dma_length = sg->length;
-       }
-       return nents;
-}
-
-static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
-                             int nents, enum dma_data_direction direction)
-{
-       /* This is only a stub; nothing to be done here */
-}
-
-static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
-                                        size_t size,
-                                        enum dma_data_direction dir)
-{
-       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
-}
-
-static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
-                                           size_t size,
-                                           enum dma_data_direction dir)
-{
-       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
-                                    u64 *dma_handle, gfp_t flag)
-{
-       struct page *p;
-       void *addr = NULL;
-       u64 dma_addr;
-
-       p = alloc_pages(flag, get_order(size));
-       if (p) {
-               addr = page_address(p);
-               dma_addr = ehca_map_vaddr(addr);
-               if (ehca_dma_mapping_error(dev, dma_addr)) {
-                       free_pages((unsigned long)addr, get_order(size));
-                       return NULL;
-               }
-               if (dma_handle)
-                       *dma_handle = dma_addr;
-               return addr;
-       }
-       return NULL;
-}
-
-static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
-                                  void *cpu_addr, u64 dma_handle)
-{
-       if (cpu_addr && size)
-               free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-
-struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
-       .mapping_error          = ehca_dma_mapping_error,
-       .map_single             = ehca_dma_map_single,
-       .unmap_single           = ehca_dma_unmap_single,
-       .map_page               = ehca_dma_map_page,
-       .unmap_page             = ehca_dma_unmap_page,
-       .map_sg                 = ehca_dma_map_sg,
-       .unmap_sg               = ehca_dma_unmap_sg,
-       .sync_single_for_cpu    = ehca_dma_sync_single_for_cpu,
-       .sync_single_for_device = ehca_dma_sync_single_for_device,
-       .alloc_coherent         = ehca_dma_alloc_coherent,
-       .free_coherent          = ehca_dma_free_coherent,
-};
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
deleted file mode 100644 (file)
index 52bfa95..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  MR/MW declarations and inline functions
- *
- *  Authors: Dietmar Decker <ddecker@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _EHCA_MRMW_H_
-#define _EHCA_MRMW_H_
-
-enum ehca_reg_type {
-       EHCA_REG_MR,
-       EHCA_REG_BUSMAP_MR
-};
-
-int ehca_reg_mr(struct ehca_shca *shca,
-               struct ehca_mr *e_mr,
-               u64 *iova_start,
-               u64 size,
-               int acl,
-               struct ehca_pd *e_pd,
-               struct ehca_mr_pginfo *pginfo,
-               u32 *lkey,
-               u32 *rkey,
-               enum ehca_reg_type reg_type);
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
-                      struct ehca_mr *e_mr,
-                      struct ehca_mr_pginfo *pginfo);
-
-int ehca_rereg_mr(struct ehca_shca *shca,
-                 struct ehca_mr *e_mr,
-                 u64 *iova_start,
-                 u64 size,
-                 int mr_access_flags,
-                 struct ehca_pd *e_pd,
-                 struct ehca_mr_pginfo *pginfo,
-                 u32 *lkey,
-                 u32 *rkey);
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
-                      struct ehca_mr *e_fmr);
-
-int ehca_reg_smr(struct ehca_shca *shca,
-                struct ehca_mr *e_origmr,
-                struct ehca_mr *e_newmr,
-                u64 *iova_start,
-                int acl,
-                struct ehca_pd *e_pd,
-                u32 *lkey,
-                u32 *rkey);
-
-int ehca_reg_internal_maxmr(struct ehca_shca *shca,
-                           struct ehca_pd *e_pd,
-                           struct ehca_mr **maxmr);
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
-                  struct ehca_mr *e_newmr,
-                  u64 *iova_start,
-                  int acl,
-                  struct ehca_pd *e_pd,
-                  u32 *lkey,
-                  u32 *rkey);
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
-
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
-                            u64 *page_list,
-                            int list_len);
-
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
-                    u32 number,
-                    u64 *kpage);
-
-int ehca_mr_is_maxmr(u64 size,
-                    u64 *iova_start);
-
-void ehca_mrmw_map_acl(int ib_acl,
-                      u32 *hipz_acl);
-
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
-
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
-                              int *ib_acl);
-
-void ehca_mr_deletenew(struct ehca_mr *mr);
-
-int ehca_create_busmap(void);
-
-void ehca_destroy_busmap(void);
-
-extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
-#endif  /*_EHCA_MRMW_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
deleted file mode 100644 (file)
index 2a8aae4..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  PD functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-
-static struct kmem_cache *pd_cache;
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
-                           struct ib_ucontext *context, struct ib_udata *udata)
-{
-       struct ehca_pd *pd;
-       int i;
-
-       pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
-       if (!pd) {
-               ehca_err(device, "device=%p context=%p out of memory",
-                        device, context);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       for (i = 0; i < 2; i++) {
-               INIT_LIST_HEAD(&pd->free[i]);
-               INIT_LIST_HEAD(&pd->full[i]);
-       }
-       mutex_init(&pd->lock);
-
-       /*
-        * Kernel PD: when device = -1, 0
-        * User   PD: when context != -1
-        */
-       if (!context) {
-               /*
-                * Kernel PDs after init reuses always
-                * the one created in ehca_shca_reopen()
-                */
-               struct ehca_shca *shca = container_of(device, struct ehca_shca,
-                                                     ib_device);
-               pd->fw_pd.value = shca->pd->fw_pd.value;
-       } else
-               pd->fw_pd.value = (u64)pd;
-
-       return &pd->ib_pd;
-}
-
-int ehca_dealloc_pd(struct ib_pd *pd)
-{
-       struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
-       int i, leftovers = 0;
-       struct ipz_small_queue_page *page, *tmp;
-
-       for (i = 0; i < 2; i++) {
-               list_splice(&my_pd->full[i], &my_pd->free[i]);
-               list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
-                       leftovers = 1;
-                       free_page(page->page);
-                       kmem_cache_free(small_qp_cache, page);
-               }
-       }
-
-       if (leftovers)
-               ehca_warn(pd->device,
-                         "Some small queue pages were not freed");
-
-       kmem_cache_free(pd_cache, my_pd);
-
-       return 0;
-}
-
-int ehca_init_pd_cache(void)
-{
-       pd_cache = kmem_cache_create("ehca_cache_pd",
-                                    sizeof(struct ehca_pd), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!pd_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_pd_cache(void)
-{
-       kmem_cache_destroy(pd_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
deleted file mode 100644 (file)
index 90c4efa..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Hardware request structures
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _EHCA_QES_H_
-#define _EHCA_QES_H_
-
-#include "ehca_tools.h"
-
-/* virtual scatter gather entry to specify remote addresses with length */
-struct ehca_vsgentry {
-       u64 vaddr;
-       u32 lkey;
-       u32 length;
-};
-
-#define GRH_FLAG_MASK        EHCA_BMASK_IBM( 7,  7)
-#define GRH_IPVERSION_MASK   EHCA_BMASK_IBM( 0,  3)
-#define GRH_TCLASS_MASK      EHCA_BMASK_IBM( 4, 12)
-#define GRH_FLOWLABEL_MASK   EHCA_BMASK_IBM(13, 31)
-#define GRH_PAYLEN_MASK      EHCA_BMASK_IBM(32, 47)
-#define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48, 55)
-#define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56, 63)
-
-/*
- * Unreliable Datagram Address Vector Format
- * see IBTA Vol1 chapter 8.3 Global Routing Header
- */
-struct ehca_ud_av {
-       u8 sl;
-       u8 lnh;
-       u16 dlid;
-       u8 reserved1;
-       u8 reserved2;
-       u8 reserved3;
-       u8 slid_path_bits;
-       u8 reserved4;
-       u8 ipd;
-       u8 reserved5;
-       u8 pmtu;
-       u32 reserved6;
-       u64 reserved7;
-       union {
-               struct {
-                       u64 word_0; /* always set to 6  */
-                       /*should be 0x1B for IB transport */
-                       u64 word_1;
-                       u64 word_2;
-                       u64 word_3;
-                       u64 word_4;
-               } grh;
-               struct {
-                       u32 wd_0;
-                       u32 wd_1;
-                       /* DWord_1 --> SGID */
-
-                       u32 sgid_wd3;
-                       u32 sgid_wd2;
-
-                       u32 sgid_wd1;
-                       u32 sgid_wd0;
-                       /* DWord_3 --> DGID */
-
-                       u32 dgid_wd3;
-                       u32 dgid_wd2;
-
-                       u32 dgid_wd1;
-                       u32 dgid_wd0;
-               } grh_l;
-       };
-};
-
-/* maximum number of sg entries allowed in a WQE */
-#define MAX_WQE_SG_ENTRIES 252
-
-#define WQE_OPTYPE_SEND             0x80
-#define WQE_OPTYPE_RDMAREAD         0x40
-#define WQE_OPTYPE_RDMAWRITE        0x20
-#define WQE_OPTYPE_CMPSWAP          0x10
-#define WQE_OPTYPE_FETCHADD         0x08
-#define WQE_OPTYPE_BIND             0x04
-
-#define WQE_WRFLAG_REQ_SIGNAL_COM   0x80
-#define WQE_WRFLAG_FENCE            0x40
-#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
-#define WQE_WRFLAG_SOLIC_EVENT      0x10
-
-#define WQEF_CACHE_HINT             0x80
-#define WQEF_CACHE_HINT_RD_WR       0x40
-#define WQEF_TIMED_WQE              0x20
-#define WQEF_PURGE                  0x08
-#define WQEF_HIGH_NIBBLE            0xF0
-
-#define MW_BIND_ACCESSCTRL_R_WRITE   0x40
-#define MW_BIND_ACCESSCTRL_R_READ    0x20
-#define MW_BIND_ACCESSCTRL_R_ATOMIC  0x10
-
-struct ehca_wqe {
-       u64 work_request_id;
-       u8 optype;
-       u8 wr_flag;
-       u16 pkeyi;
-       u8 wqef;
-       u8 nr_of_data_seg;
-       u16 wqe_provided_slid;
-       u32 destination_qp_number;
-       u32 resync_psn_sqp;
-       u32 local_ee_context_qkey;
-       u32 immediate_data;
-       union {
-               struct {
-                       u64 remote_virtual_address;
-                       u32 rkey;
-                       u32 reserved;
-                       u64 atomic_1st_op_dma_len;
-                       u64 atomic_2nd_op;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-
-               } nud;
-               struct {
-                       u64 ehca_ud_av_ptr;
-                       u64 reserved1;
-                       u64 reserved2;
-                       u64 reserved3;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-               } ud_avp;
-               struct {
-                       struct ehca_ud_av ud_av;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
-                                                    2];
-               } ud_av;
-               struct {
-                       u64 reserved0;
-                       u64 reserved1;
-                       u64 reserved2;
-                       u64 reserved3;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-               } all_rcv;
-
-               struct {
-                       u64 reserved;
-                       u32 rkey;
-                       u32 old_rkey;
-                       u64 reserved1;
-                       u64 reserved2;
-                       u64 virtual_address;
-                       u32 reserved3;
-                       u32 length;
-                       u32 reserved4;
-                       u16 reserved5;
-                       u8 reserved6;
-                       u8 lr_ctl;
-                       u32 lkey;
-                       u32 reserved7;
-                       u64 reserved8;
-                       u64 reserved9;
-                       u64 reserved10;
-                       u64 reserved11;
-               } bind;
-               struct {
-                       u64 reserved12;
-                       u64 reserved13;
-                       u32 size;
-                       u32 start;
-               } inline_data;
-       } u;
-
-};
-
-#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
-#define WC_IMM_DATA     EHCA_BMASK_IBM(1, 1)
-#define WC_GRH_PRESENT  EHCA_BMASK_IBM(2, 2)
-#define WC_SE_BIT       EHCA_BMASK_IBM(3, 3)
-#define WC_STATUS_ERROR_BIT 0x80000000
-#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
-#define WC_STATUS_PURGE_BIT 0x10
-#define WC_SEND_RECEIVE_BIT 0x80
-
-struct ehca_cqe {
-       u64 work_request_id;
-       u8 optype;
-       u8 w_completion_flags;
-       u16 reserved1;
-       u32 nr_bytes_transferred;
-       u32 immediate_data;
-       u32 local_qp_number;
-       u8 freed_resource_count;
-       u8 service_level;
-       u16 wqe_count;
-       u32 qp_token;
-       u32 qkey_ee_token;
-       u32 remote_qp_number;
-       u16 dlid;
-       u16 rlid;
-       u16 reserved2;
-       u16 pkey_index;
-       u32 cqe_timestamp;
-       u32 wqe_timestamp;
-       u8 wqe_timestamp_valid;
-       u8 reserved3;
-       u8 reserved4;
-       u8 cqe_flags;
-       u32 status;
-};
-
-struct ehca_eqe {
-       u64 entry;
-};
-
-struct ehca_mrte {
-       u64 starting_va;
-       u64 length; /* length of memory region in bytes*/
-       u32 pd;
-       u8 key_instance;
-       u8 pagesize;
-       u8 mr_control;
-       u8 local_remote_access_ctrl;
-       u8 reserved[0x20 - 0x18];
-       u64 at_pointer[4];
-};
-#endif /*_EHCA_QES_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
deleted file mode 100644 (file)
index 896c01f..0000000
+++ /dev/null
@@ -1,2256 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  QP functions
- *
- *  Authors: Joachim Fenkes <fenkes@de.ibm.com>
- *           Stefan Roscher <stefan.roscher@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-static struct kmem_cache *qp_cache;
-
-/*
- * attributes not supported by query qp
- */
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS       | \
-                                    IB_QP_EN_SQD_ASYNC_NOTIFY)
-
-/*
- * ehca (internal) qp state values
- */
-enum ehca_qp_state {
-       EHCA_QPS_RESET = 1,
-       EHCA_QPS_INIT = 2,
-       EHCA_QPS_RTR = 3,
-       EHCA_QPS_RTS = 5,
-       EHCA_QPS_SQD = 6,
-       EHCA_QPS_SQE = 8,
-       EHCA_QPS_ERR = 128
-};
-
-/*
- * qp state transitions as defined by IB Arch Rel 1.1 page 431
- */
-enum ib_qp_statetrans {
-       IB_QPST_ANY2RESET,
-       IB_QPST_ANY2ERR,
-       IB_QPST_RESET2INIT,
-       IB_QPST_INIT2RTR,
-       IB_QPST_INIT2INIT,
-       IB_QPST_RTR2RTS,
-       IB_QPST_RTS2SQD,
-       IB_QPST_RTS2RTS,
-       IB_QPST_SQD2RTS,
-       IB_QPST_SQE2RTS,
-       IB_QPST_SQD2SQD,
-       IB_QPST_MAX     /* nr of transitions, this must be last!!! */
-};
-
-/*
- * ib2ehca_qp_state maps IB to ehca qp_state
- * returns ehca qp state corresponding to given ib qp state
- */
-static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
-{
-       switch (ib_qp_state) {
-       case IB_QPS_RESET:
-               return EHCA_QPS_RESET;
-       case IB_QPS_INIT:
-               return EHCA_QPS_INIT;
-       case IB_QPS_RTR:
-               return EHCA_QPS_RTR;
-       case IB_QPS_RTS:
-               return EHCA_QPS_RTS;
-       case IB_QPS_SQD:
-               return EHCA_QPS_SQD;
-       case IB_QPS_SQE:
-               return EHCA_QPS_SQE;
-       case IB_QPS_ERR:
-               return EHCA_QPS_ERR;
-       default:
-               ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
-               return -EINVAL;
-       }
-}
-
-/*
- * ehca2ib_qp_state maps ehca to IB qp_state
- * returns ib qp state corresponding to given ehca qp state
- */
-static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
-                                               ehca_qp_state)
-{
-       switch (ehca_qp_state) {
-       case EHCA_QPS_RESET:
-               return IB_QPS_RESET;
-       case EHCA_QPS_INIT:
-               return IB_QPS_INIT;
-       case EHCA_QPS_RTR:
-               return IB_QPS_RTR;
-       case EHCA_QPS_RTS:
-               return IB_QPS_RTS;
-       case EHCA_QPS_SQD:
-               return IB_QPS_SQD;
-       case EHCA_QPS_SQE:
-               return IB_QPS_SQE;
-       case EHCA_QPS_ERR:
-               return IB_QPS_ERR;
-       default:
-               ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
-               return -EINVAL;
-       }
-}
-
-/*
- * ehca_qp_type used as index for req_attr and opt_attr of
- * struct ehca_modqp_statetrans
- */
-enum ehca_qp_type {
-       QPT_RC = 0,
-       QPT_UC = 1,
-       QPT_UD = 2,
-       QPT_SQP = 3,
-       QPT_MAX
-};
-
-/*
- * ib2ehcaqptype maps Ib to ehca qp_type
- * returns ehca qp type corresponding to ib qp type
- */
-static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
-{
-       switch (ibqptype) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               return QPT_SQP;
-       case IB_QPT_RC:
-               return QPT_RC;
-       case IB_QPT_UC:
-               return QPT_UC;
-       case IB_QPT_UD:
-               return QPT_UD;
-       default:
-               ehca_gen_err("Invalid ibqptype=%x", ibqptype);
-               return -EINVAL;
-       }
-}
-
-static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
-                                                        int ib_tostate)
-{
-       int index = -EINVAL;
-       switch (ib_tostate) {
-       case IB_QPS_RESET:
-               index = IB_QPST_ANY2RESET;
-               break;
-       case IB_QPS_INIT:
-               switch (ib_fromstate) {
-               case IB_QPS_RESET:
-                       index = IB_QPST_RESET2INIT;
-                       break;
-               case IB_QPS_INIT:
-                       index = IB_QPST_INIT2INIT;
-                       break;
-               }
-               break;
-       case IB_QPS_RTR:
-               if (ib_fromstate == IB_QPS_INIT)
-                       index = IB_QPST_INIT2RTR;
-               break;
-       case IB_QPS_RTS:
-               switch (ib_fromstate) {
-               case IB_QPS_RTR:
-                       index = IB_QPST_RTR2RTS;
-                       break;
-               case IB_QPS_RTS:
-                       index = IB_QPST_RTS2RTS;
-                       break;
-               case IB_QPS_SQD:
-                       index = IB_QPST_SQD2RTS;
-                       break;
-               case IB_QPS_SQE:
-                       index = IB_QPST_SQE2RTS;
-                       break;
-               }
-               break;
-       case IB_QPS_SQD:
-               if (ib_fromstate == IB_QPS_RTS)
-                       index = IB_QPST_RTS2SQD;
-               break;
-       case IB_QPS_SQE:
-               break;
-       case IB_QPS_ERR:
-               index = IB_QPST_ANY2ERR;
-               break;
-       default:
-               break;
-       }
-       return index;
-}
-
-/*
- * ibqptype2servicetype returns hcp service type corresponding to given
- * ib qp type used by create_qp()
- */
-static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
-{
-       switch (ibqptype) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               return ST_UD;
-       case IB_QPT_RC:
-               return ST_RC;
-       case IB_QPT_UC:
-               return ST_UC;
-       case IB_QPT_UD:
-               return ST_UD;
-       case IB_QPT_RAW_IPV6:
-               return -EINVAL;
-       case IB_QPT_RAW_ETHERTYPE:
-               return -EINVAL;
-       default:
-               ehca_gen_err("Invalid ibqptype=%x", ibqptype);
-               return -EINVAL;
-       }
-}
-
-/*
- * init userspace queue info from ipz_queue data
- */
-static inline void queue2resp(struct ipzu_queue_resp *resp,
-                             struct ipz_queue *queue)
-{
-       resp->qe_size = queue->qe_size;
-       resp->act_nr_of_sg = queue->act_nr_of_sg;
-       resp->queue_length = queue->queue_length;
-       resp->pagesize = queue->pagesize;
-       resp->toggle_state = queue->toggle_state;
-       resp->offset = queue->offset;
-}
-
-/*
- * init_qp_queue initializes/constructs r/squeue and registers queue pages.
- */
-static inline int init_qp_queue(struct ehca_shca *shca,
-                               struct ehca_pd *pd,
-                               struct ehca_qp *my_qp,
-                               struct ipz_queue *queue,
-                               int q_type,
-                               u64 expected_hret,
-                               struct ehca_alloc_queue_parms *parms,
-                               int wqe_size)
-{
-       int ret, cnt, ipz_rc, nr_q_pages;
-       void *vpage;
-       u64 rpage, h_ret;
-       struct ib_device *ib_dev = &shca->ib_device;
-       struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
-
-       if (!parms->queue_size)
-               return 0;
-
-       if (parms->is_small) {
-               nr_q_pages = 1;
-               ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
-                                       128 << parms->page_size,
-                                       wqe_size, parms->act_nr_sges, 1);
-       } else {
-               nr_q_pages = parms->queue_size;
-               ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
-                                       EHCA_PAGESIZE, wqe_size,
-                                       parms->act_nr_sges, 0);
-       }
-
-       if (!ipz_rc) {
-               ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
-                        ipz_rc);
-               return -EBUSY;
-       }
-
-       /* register queue pages */
-       for (cnt = 0; cnt < nr_q_pages; cnt++) {
-               vpage = ipz_qpageit_get_inc(queue);
-               if (!vpage) {
-                       ehca_err(ib_dev, "ipz_qpageit_get_inc() "
-                                "failed p_vpage= %p", vpage);
-                       ret = -EINVAL;
-                       goto init_qp_queue1;
-               }
-               rpage = __pa(vpage);
-
-               h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
-                                                my_qp->ipz_qp_handle,
-                                                NULL, 0, q_type,
-                                                rpage, parms->is_small ? 0 : 1,
-                                                my_qp->galpas.kernel);
-               if (cnt == (nr_q_pages - 1)) {  /* last page! */
-                       if (h_ret != expected_hret) {
-                               ehca_err(ib_dev, "hipz_qp_register_rpage() "
-                                        "h_ret=%lli", h_ret);
-                               ret = ehca2ib_return_code(h_ret);
-                               goto init_qp_queue1;
-                       }
-                       vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
-                       if (vpage) {
-                               ehca_err(ib_dev, "ipz_qpageit_get_inc() "
-                                        "should not succeed vpage=%p", vpage);
-                               ret = -EINVAL;
-                               goto init_qp_queue1;
-                       }
-               } else {
-                       if (h_ret != H_PAGE_REGISTERED) {
-                               ehca_err(ib_dev, "hipz_qp_register_rpage() "
-                                        "h_ret=%lli", h_ret);
-                               ret = ehca2ib_return_code(h_ret);
-                               goto init_qp_queue1;
-                       }
-               }
-       }
-
-       ipz_qeit_reset(queue);
-
-       return 0;
-
-init_qp_queue1:
-       ipz_queue_dtor(pd, queue);
-       return ret;
-}
-
-static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
-{
-       if (is_llqp)
-               return 128 << act_nr_sge;
-       else
-               return offsetof(struct ehca_wqe,
-                               u.nud.sg_list[act_nr_sge]);
-}
-
-static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
-                                      int req_nr_sge, int is_llqp)
-{
-       u32 wqe_size, q_size;
-       int act_nr_sge = req_nr_sge;
-
-       if (!is_llqp)
-               /* round up #SGEs so WQE size is a power of 2 */
-               for (act_nr_sge = 4; act_nr_sge <= 252;
-                    act_nr_sge = 4 + 2 * act_nr_sge)
-                       if (act_nr_sge >= req_nr_sge)
-                               break;
-
-       wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
-       q_size = wqe_size * (queue->max_wr + 1);
-
-       if (q_size <= 512)
-               queue->page_size = 2;
-       else if (q_size <= 1024)
-               queue->page_size = 3;
-       else
-               queue->page_size = 0;
-
-       queue->is_small = (queue->page_size != 0);
-}
-
-/* needs to be called with cq->spinlock held */
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
-{
-       struct list_head *list, *node;
-
-       /* TODO: support low latency QPs */
-       if (qp->ext_type == EQPT_LLQP)
-               return;
-
-       if (on_sq) {
-               list = &qp->send_cq->sqp_err_list;
-               node = &qp->sq_err_node;
-       } else {
-               list = &qp->recv_cq->rqp_err_list;
-               node = &qp->rq_err_node;
-       }
-
-       if (list_empty(node))
-               list_add_tail(node, list);
-
-       return;
-}
-
-static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->spinlock, flags);
-
-       if (!list_empty(node))
-               list_del_init(node);
-
-       spin_unlock_irqrestore(&cq->spinlock, flags);
-}
-
-static void reset_queue_map(struct ehca_queue_map *qmap)
-{
-       int i;
-
-       qmap->tail = qmap->entries - 1;
-       qmap->left_to_poll = 0;
-       qmap->next_wqe_idx = 0;
-       for (i = 0; i < qmap->entries; i++) {
-               qmap->map[i].reported = 1;
-               qmap->map[i].cqe_req = 0;
-       }
-}
-
-/*
- * Create an ib_qp struct that is either a QP or an SRQ, depending on
- * the value of the is_srq parameter. If init_attr and srq_init_attr share
- * fields, the field out of init_attr is used.
- */
-static struct ehca_qp *internal_create_qp(
-       struct ib_pd *pd,
-       struct ib_qp_init_attr *init_attr,
-       struct ib_srq_init_attr *srq_init_attr,
-       struct ib_udata *udata, int is_srq)
-{
-       struct ehca_qp *my_qp, *my_srq = NULL;
-       struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-                                             ib_device);
-       struct ib_ucontext *context = NULL;
-       u64 h_ret;
-       int is_llqp = 0, has_srq = 0, is_user = 0;
-       int qp_type, max_send_sge, max_recv_sge, ret;
-
-       /* h_call's out parameters */
-       struct ehca_alloc_qp_parms parms;
-       u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
-       unsigned long flags;
-
-       if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
-               ehca_err(pd->device, "Unable to create QP, max number of %i "
-                        "QPs reached.", shca->max_num_qps);
-               ehca_err(pd->device, "To increase the maximum number of QPs "
-                        "use the number_of_qps module parameter.\n");
-               return ERR_PTR(-ENOSPC);
-       }
-
-       if (init_attr->create_flags) {
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       memset(&parms, 0, sizeof(parms));
-       qp_type = init_attr->qp_type;
-
-       if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
-               init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
-               ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
-                        init_attr->sq_sig_type);
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /* save LLQP info */
-       if (qp_type & 0x80) {
-               is_llqp = 1;
-               parms.ext_type = EQPT_LLQP;
-               parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
-       }
-       qp_type &= 0x1F;
-       init_attr->qp_type &= 0x1F;
-
-       /* handle SRQ base QPs */
-       if (init_attr->srq) {
-               my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
-
-               if (qp_type == IB_QPT_UC) {
-                       ehca_err(pd->device, "UC with SRQ not supported");
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-
-               has_srq = 1;
-               parms.ext_type = EQPT_SRQBASE;
-               parms.srq_qpn = my_srq->real_qp_num;
-       }
-
-       if (is_llqp && has_srq) {
-               ehca_err(pd->device, "LLQPs can't have an SRQ");
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /* handle SRQs */
-       if (is_srq) {
-               parms.ext_type = EQPT_SRQ;
-               parms.srq_limit = srq_init_attr->attr.srq_limit;
-               if (init_attr->cap.max_recv_sge > 3) {
-                       ehca_err(pd->device, "no more than three SGEs "
-                                "supported for SRQ  pd=%p  max_sge=%x",
-                                pd, init_attr->cap.max_recv_sge);
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-       }
-
-       /* check QP type */
-       if (qp_type != IB_QPT_UD &&
-           qp_type != IB_QPT_UC &&
-           qp_type != IB_QPT_RC &&
-           qp_type != IB_QPT_SMI &&
-           qp_type != IB_QPT_GSI) {
-               ehca_err(pd->device, "wrong QP Type=%x", qp_type);
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (is_llqp) {
-               switch (qp_type) {
-               case IB_QPT_RC:
-                       if ((init_attr->cap.max_send_wr > 255) ||
-                           (init_attr->cap.max_recv_wr > 255)) {
-                               ehca_err(pd->device,
-                                        "Invalid Number of max_sq_wr=%x "
-                                        "or max_rq_wr=%x for RC LLQP",
-                                        init_attr->cap.max_send_wr,
-                                        init_attr->cap.max_recv_wr);
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-EINVAL);
-                       }
-                       break;
-               case IB_QPT_UD:
-                       if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
-                               ehca_err(pd->device, "UD LLQP not supported "
-                                        "by this adapter");
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-ENOSYS);
-                       }
-                       if (!(init_attr->cap.max_send_sge <= 5
-                           && init_attr->cap.max_send_sge >= 1
-                           && init_attr->cap.max_recv_sge <= 5
-                           && init_attr->cap.max_recv_sge >= 1)) {
-                               ehca_err(pd->device,
-                                        "Invalid Number of max_send_sge=%x "
-                                        "or max_recv_sge=%x for UD LLQP",
-                                        init_attr->cap.max_send_sge,
-                                        init_attr->cap.max_recv_sge);
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-EINVAL);
-                       } else if (init_attr->cap.max_send_wr > 255) {
-                               ehca_err(pd->device,
-                                        "Invalid Number of "
-                                        "max_send_wr=%x for UD QP_TYPE=%x",
-                                        init_attr->cap.max_send_wr, qp_type);
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-EINVAL);
-                       }
-                       break;
-               default:
-                       ehca_err(pd->device, "unsupported LL QP Type=%x",
-                                qp_type);
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-       } else {
-               int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
-                              || qp_type == IB_QPT_GSI) ? 250 : 252;
-
-               if (init_attr->cap.max_send_sge > max_sge
-                   || init_attr->cap.max_recv_sge > max_sge) {
-                       ehca_err(pd->device, "Invalid number of SGEs requested "
-                                "send_sge=%x recv_sge=%x max_sge=%x",
-                                init_attr->cap.max_send_sge,
-                                init_attr->cap.max_recv_sge, max_sge);
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-       }
-
-       my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
-       if (!my_qp) {
-               ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (pd->uobject && udata) {
-               is_user = 1;
-               context = pd->uobject->context;
-       }
-
-       atomic_set(&my_qp->nr_events, 0);
-       init_waitqueue_head(&my_qp->wait_completion);
-       spin_lock_init(&my_qp->spinlock_s);
-       spin_lock_init(&my_qp->spinlock_r);
-       my_qp->qp_type = qp_type;
-       my_qp->ext_type = parms.ext_type;
-       my_qp->state = IB_QPS_RESET;
-
-       if (init_attr->recv_cq)
-               my_qp->recv_cq =
-                       container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
-       if (init_attr->send_cq)
-               my_qp->send_cq =
-                       container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
-
-       idr_preload(GFP_KERNEL);
-       write_lock_irqsave(&ehca_qp_idr_lock, flags);
-
-       ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
-       if (ret >= 0)
-               my_qp->token = ret;
-
-       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-       idr_preload_end();
-       if (ret < 0) {
-               if (ret == -ENOSPC) {
-                       ret = -EINVAL;
-                       ehca_err(pd->device, "Invalid number of qp");
-               } else {
-                       ret = -ENOMEM;
-                       ehca_err(pd->device, "Can't allocate new idr entry.");
-               }
-               goto create_qp_exit0;
-       }
-
-       if (has_srq)
-               parms.srq_token = my_qp->token;
-
-       parms.servicetype = ibqptype2servicetype(qp_type);
-       if (parms.servicetype < 0) {
-               ret = -EINVAL;
-               ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
-               goto create_qp_exit1;
-       }
-
-       /* Always signal by WQE so we can hide circ. WQEs */
-       parms.sigtype = HCALL_SIGT_BY_WQE;
-
-       /* UD_AV CIRCUMVENTION */
-       max_send_sge = init_attr->cap.max_send_sge;
-       max_recv_sge = init_attr->cap.max_recv_sge;
-       if (parms.servicetype == ST_UD && !is_llqp) {
-               max_send_sge += 2;
-               max_recv_sge += 2;
-       }
-
-       parms.token = my_qp->token;
-       parms.eq_handle = shca->eq.ipz_eq_handle;
-       parms.pd = my_pd->fw_pd;
-       if (my_qp->send_cq)
-               parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
-       if (my_qp->recv_cq)
-               parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
-
-       parms.squeue.max_wr = init_attr->cap.max_send_wr;
-       parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
-       parms.squeue.max_sge = max_send_sge;
-       parms.rqueue.max_sge = max_recv_sge;
-
-       /* RC QPs need one more SWQE for unsolicited ack circumvention */
-       if (qp_type == IB_QPT_RC)
-               parms.squeue.max_wr++;
-
-       if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
-               if (HAS_SQ(my_qp))
-                       ehca_determine_small_queue(
-                               &parms.squeue, max_send_sge, is_llqp);
-               if (HAS_RQ(my_qp))
-                       ehca_determine_small_queue(
-                               &parms.rqueue, max_recv_sge, is_llqp);
-               parms.qp_storage =
-                       (parms.squeue.is_small || parms.rqueue.is_small);
-       }
-
-       h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
-                        h_ret);
-               ret = ehca2ib_return_code(h_ret);
-               goto create_qp_exit1;
-       }
-
-       ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
-       my_qp->ipz_qp_handle = parms.qp_handle;
-       my_qp->galpas = parms.galpas;
-
-       swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
-       rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
-
-       switch (qp_type) {
-       case IB_QPT_RC:
-               if (is_llqp) {
-                       parms.squeue.act_nr_sges = 1;
-                       parms.rqueue.act_nr_sges = 1;
-               }
-               /* hide the extra WQE */
-               parms.squeue.act_nr_wqes--;
-               break;
-       case IB_QPT_UD:
-       case IB_QPT_GSI:
-       case IB_QPT_SMI:
-               /* UD circumvention */
-               if (is_llqp) {
-                       parms.squeue.act_nr_sges = 1;
-                       parms.rqueue.act_nr_sges = 1;
-               } else {
-                       parms.squeue.act_nr_sges -= 2;
-                       parms.rqueue.act_nr_sges -= 2;
-               }
-
-               if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
-                       parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
-                       parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
-                       parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
-                       parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
-                       ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
-               }
-
-               break;
-
-       default:
-               break;
-       }
-
-       /* initialize r/squeue and register queue pages */
-       if (HAS_SQ(my_qp)) {
-               ret = init_qp_queue(
-                       shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
-                       HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
-                       &parms.squeue, swqe_size);
-               if (ret) {
-                       ehca_err(pd->device, "Couldn't initialize squeue "
-                                "and pages ret=%i", ret);
-                       goto create_qp_exit2;
-               }
-
-               if (!is_user) {
-                       my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
-                               my_qp->ipz_squeue.qe_size;
-                       my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
-                                                   sizeof(struct ehca_qmap_entry));
-                       if (!my_qp->sq_map.map) {
-                               ehca_err(pd->device, "Couldn't allocate squeue "
-                                        "map ret=%i", ret);
-                               goto create_qp_exit3;
-                       }
-                       INIT_LIST_HEAD(&my_qp->sq_err_node);
-                       /* to avoid the generation of bogus flush CQEs */
-                       reset_queue_map(&my_qp->sq_map);
-               }
-       }
-
-       if (HAS_RQ(my_qp)) {
-               ret = init_qp_queue(
-                       shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
-                       H_SUCCESS, &parms.rqueue, rwqe_size);
-               if (ret) {
-                       ehca_err(pd->device, "Couldn't initialize rqueue "
-                                "and pages ret=%i", ret);
-                       goto create_qp_exit4;
-               }
-               if (!is_user) {
-                       my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
-                               my_qp->ipz_rqueue.qe_size;
-                       my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
-                                                   sizeof(struct ehca_qmap_entry));
-                       if (!my_qp->rq_map.map) {
-                               ehca_err(pd->device, "Couldn't allocate squeue "
-                                        "map ret=%i", ret);
-                               goto create_qp_exit5;
-                       }
-                       INIT_LIST_HEAD(&my_qp->rq_err_node);
-                       /* to avoid the generation of bogus flush CQEs */
-                       reset_queue_map(&my_qp->rq_map);
-               }
-       } else if (init_attr->srq && !is_user) {
-               /* this is a base QP, use the queue map of the SRQ */
-               my_qp->rq_map = my_srq->rq_map;
-               INIT_LIST_HEAD(&my_qp->rq_err_node);
-
-               my_qp->ipz_rqueue = my_srq->ipz_rqueue;
-       }
-
-       if (is_srq) {
-               my_qp->ib_srq.pd = &my_pd->ib_pd;
-               my_qp->ib_srq.device = my_pd->ib_pd.device;
-
-               my_qp->ib_srq.srq_context = init_attr->qp_context;
-               my_qp->ib_srq.event_handler = init_attr->event_handler;
-       } else {
-               my_qp->ib_qp.qp_num = ib_qp_num;
-               my_qp->ib_qp.pd = &my_pd->ib_pd;
-               my_qp->ib_qp.device = my_pd->ib_pd.device;
-
-               my_qp->ib_qp.recv_cq = init_attr->recv_cq;
-               my_qp->ib_qp.send_cq = init_attr->send_cq;
-
-               my_qp->ib_qp.qp_type = qp_type;
-               my_qp->ib_qp.srq = init_attr->srq;
-
-               my_qp->ib_qp.qp_context = init_attr->qp_context;
-               my_qp->ib_qp.event_handler = init_attr->event_handler;
-       }
-
-       init_attr->cap.max_inline_data = 0; /* not supported yet */
-       init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
-       init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
-       init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
-       init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
-       my_qp->init_attr = *init_attr;
-
-       if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-               shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-                       &my_qp->ib_qp;
-               if (ehca_nr_ports < 0) {
-                       /* alloc array to cache subsequent modify qp parms
-                        * for autodetect mode
-                        */
-                       my_qp->mod_qp_parm =
-                               kzalloc(EHCA_MOD_QP_PARM_MAX *
-                                       sizeof(*my_qp->mod_qp_parm),
-                                       GFP_KERNEL);
-                       if (!my_qp->mod_qp_parm) {
-                               ehca_err(pd->device,
-                                        "Could not alloc mod_qp_parm");
-                               goto create_qp_exit5;
-                       }
-               }
-       }
-
-       /* NOTE: define_apq0() not supported yet */
-       if (qp_type == IB_QPT_GSI) {
-               h_ret = ehca_define_sqp(shca, my_qp, init_attr);
-               if (h_ret != H_SUCCESS) {
-                       kfree(my_qp->mod_qp_parm);
-                       my_qp->mod_qp_parm = NULL;
-                       /* the QP pointer is no longer valid */
-                       shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-                               NULL;
-                       ret = ehca2ib_return_code(h_ret);
-                       goto create_qp_exit6;
-               }
-       }
-
-       if (my_qp->send_cq) {
-               ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
-               if (ret) {
-                       ehca_err(pd->device,
-                                "Couldn't assign qp to send_cq ret=%i", ret);
-                       goto create_qp_exit7;
-               }
-       }
-
-       /* copy queues, galpa data to user space */
-       if (context && udata) {
-               struct ehca_create_qp_resp resp;
-               memset(&resp, 0, sizeof(resp));
-
-               resp.qp_num = my_qp->real_qp_num;
-               resp.token = my_qp->token;
-               resp.qp_type = my_qp->qp_type;
-               resp.ext_type = my_qp->ext_type;
-               resp.qkey = my_qp->qkey;
-               resp.real_qp_num = my_qp->real_qp_num;
-
-               if (HAS_SQ(my_qp))
-                       queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
-               if (HAS_RQ(my_qp))
-                       queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
-               resp.fw_handle_ofs = (u32)
-                       (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
-
-               if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
-                       ehca_err(pd->device, "Copy to udata failed");
-                       ret = -EINVAL;
-                       goto create_qp_exit8;
-               }
-       }
-
-       return my_qp;
-
-create_qp_exit8:
-       ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-
-create_qp_exit7:
-       kfree(my_qp->mod_qp_parm);
-
-create_qp_exit6:
-       if (HAS_RQ(my_qp) && !is_user)
-               vfree(my_qp->rq_map.map);
-
-create_qp_exit5:
-       if (HAS_RQ(my_qp))
-               ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-
-create_qp_exit4:
-       if (HAS_SQ(my_qp) && !is_user)
-               vfree(my_qp->sq_map.map);
-
-create_qp_exit3:
-       if (HAS_SQ(my_qp))
-               ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-
-create_qp_exit2:
-       hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-
-create_qp_exit1:
-       write_lock_irqsave(&ehca_qp_idr_lock, flags);
-       idr_remove(&ehca_qp_idr, my_qp->token);
-       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-create_qp_exit0:
-       kmem_cache_free(qp_cache, my_qp);
-       atomic_dec(&shca->num_qps);
-       return ERR_PTR(ret);
-}
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
-                            struct ib_qp_init_attr *qp_init_attr,
-                            struct ib_udata *udata)
-{
-       struct ehca_qp *ret;
-
-       ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
-       return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
-                              struct ib_uobject *uobject);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
-                              struct ib_srq_init_attr *srq_init_attr,
-                              struct ib_udata *udata)
-{
-       struct ib_qp_init_attr qp_init_attr;
-       struct ehca_qp *my_qp;
-       struct ib_srq *ret;
-       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-                                             ib_device);
-       struct hcp_modify_qp_control_block *mqpcb;
-       u64 hret, update_mask;
-
-       if (srq_init_attr->srq_type != IB_SRQT_BASIC)
-               return ERR_PTR(-ENOSYS);
-
-       /* For common attributes, internal_create_qp() takes its info
-        * out of qp_init_attr, so copy all common attrs there.
-        */
-       memset(&qp_init_attr, 0, sizeof(qp_init_attr));
-       qp_init_attr.event_handler = srq_init_attr->event_handler;
-       qp_init_attr.qp_context = srq_init_attr->srq_context;
-       qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
-       qp_init_attr.qp_type = IB_QPT_RC;
-       qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
-       qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
-
-       my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
-       if (IS_ERR(my_qp))
-               return (struct ib_srq *)my_qp;
-
-       /* copy back return values */
-       srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
-       srq_init_attr->attr.max_sge = 3;
-
-       /* drive SRQ into RTR state */
-       mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!mqpcb) {
-               ehca_err(pd->device, "Could not get zeroed page for mqpcb "
-                        "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
-               ret = ERR_PTR(-ENOMEM);
-               goto create_srq1;
-       }
-
-       mqpcb->qp_state = EHCA_QPS_INIT;
-       mqpcb->prim_phys_port = 1;
-       update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-       hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               update_mask,
-                               mqpcb, my_qp->galpas.kernel);
-       if (hret != H_SUCCESS) {
-               ehca_err(pd->device, "Could not modify SRQ to INIT "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, hret);
-               goto create_srq2;
-       }
-
-       mqpcb->qp_enable = 1;
-       update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-       hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               update_mask,
-                               mqpcb, my_qp->galpas.kernel);
-       if (hret != H_SUCCESS) {
-               ehca_err(pd->device, "Could not enable SRQ "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, hret);
-               goto create_srq2;
-       }
-
-       mqpcb->qp_state  = EHCA_QPS_RTR;
-       update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-       hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               update_mask,
-                               mqpcb, my_qp->galpas.kernel);
-       if (hret != H_SUCCESS) {
-               ehca_err(pd->device, "Could not modify SRQ to RTR "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, hret);
-               goto create_srq2;
-       }
-
-       ehca_free_fw_ctrlblock(mqpcb);
-
-       return &my_qp->ib_srq;
-
-create_srq2:
-       ret = ERR_PTR(ehca2ib_return_code(hret));
-       ehca_free_fw_ctrlblock(mqpcb);
-
-create_srq1:
-       internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
-
-       return ret;
-}
-
-/*
- * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
- * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
- * returns total number of bad wqes in bad_wqe_cnt
- */
-static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
-                          int *bad_wqe_cnt)
-{
-       u64 h_ret;
-       struct ipz_queue *squeue;
-       void *bad_send_wqe_p, *bad_send_wqe_v;
-       u64 q_ofs;
-       struct ehca_wqe *wqe;
-       int qp_num = my_qp->ib_qp.qp_num;
-
-       /* get send wqe pointer */
-       h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
-                                          my_qp->ipz_qp_handle, &my_qp->pf,
-                                          &bad_send_wqe_p, NULL, 2);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
-                        " ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, qp_num, h_ret);
-               return ehca2ib_return_code(h_ret);
-       }
-       bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
-       ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
-                qp_num, bad_send_wqe_p);
-       /* convert wqe pointer to vadr */
-       bad_send_wqe_v = __va((u64)bad_send_wqe_p);
-       if (ehca_debug_level >= 2)
-               ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
-       squeue = &my_qp->ipz_squeue;
-       if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
-               ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
-                        " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
-               return -EFAULT;
-       }
-
-       /* loop sets wqe's purge bit */
-       wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
-       *bad_wqe_cnt = 0;
-       while (wqe->optype != 0xff && wqe->wqef != 0xff) {
-               if (ehca_debug_level >= 2)
-                       ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
-               wqe->nr_of_data_seg = 0; /* suppress data access */
-               wqe->wqef = WQEF_PURGE; /* WQE to be purged */
-               q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
-               wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
-               *bad_wqe_cnt = (*bad_wqe_cnt)+1;
-       }
-       /*
-        * bad wqe will be reprocessed and ignored when pol_cq() is called,
-        *  i.e. nr of wqes with flush error status is one less
-        */
-       ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
-                qp_num, (*bad_wqe_cnt)-1);
-       wqe->wqef = 0;
-
-       return 0;
-}
-
-static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
-                         struct ehca_queue_map *qmap)
-{
-       void *wqe_v;
-       u64 q_ofs;
-       u32 wqe_idx;
-       unsigned int tail_idx;
-
-       /* convert real to abs address */
-       wqe_p = wqe_p & (~(1UL << 63));
-
-       wqe_v = __va(wqe_p);
-
-       if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
-               ehca_gen_err("Invalid offset for calculating left cqes "
-                               "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
-               return -EFAULT;
-       }
-
-       tail_idx = next_index(qmap->tail, qmap->entries);
-       wqe_idx = q_ofs / ipz_queue->qe_size;
-
-       /* check all processed wqes, whether a cqe is requested or not */
-       while (tail_idx != wqe_idx) {
-               if (qmap->map[tail_idx].cqe_req)
-                       qmap->left_to_poll++;
-               tail_idx = next_index(tail_idx, qmap->entries);
-       }
-       /* save index in queue, where we have to start flushing */
-       qmap->next_wqe_idx = wqe_idx;
-       return 0;
-}
-
-static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
-{
-       u64 h_ret;
-       void *send_wqe_p, *recv_wqe_p;
-       int ret;
-       unsigned long flags;
-       int qp_num = my_qp->ib_qp.qp_num;
-
-       /* this hcall is not supported on base QPs */
-       if (my_qp->ext_type != EQPT_SRQBASE) {
-               /* get send and receive wqe pointer */
-               h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle, &my_qp->pf,
-                               &send_wqe_p, &recv_wqe_p, 4);
-               if (h_ret != H_SUCCESS) {
-                       ehca_err(&shca->ib_device, "disable_and_get_wqe() "
-                                "failed ehca_qp=%p qp_num=%x h_ret=%lli",
-                                my_qp, qp_num, h_ret);
-                       return ehca2ib_return_code(h_ret);
-               }
-
-               /*
-                * acquire lock to ensure that nobody is polling the cq which
-                * could mean that the qmap->tail pointer is in an
-                * inconsistent state.
-                */
-               spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-               ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
-                               &my_qp->sq_map);
-               spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-               if (ret)
-                       return ret;
-
-
-               spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-               ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
-                               &my_qp->rq_map);
-               spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
-               if (ret)
-                       return ret;
-       } else {
-               spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-               my_qp->sq_map.left_to_poll = 0;
-               my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
-                                                       my_qp->sq_map.entries);
-               spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
-               spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-               my_qp->rq_map.left_to_poll = 0;
-               my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
-                                                       my_qp->rq_map.entries);
-               spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
-       }
-
-       /* this assures flush cqes being generated only for pending wqes */
-       if ((my_qp->sq_map.left_to_poll == 0) &&
-                               (my_qp->rq_map.left_to_poll == 0)) {
-               spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-               ehca_add_to_err_list(my_qp, 1);
-               spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
-               if (HAS_RQ(my_qp)) {
-                       spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-                       ehca_add_to_err_list(my_qp, 0);
-                       spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
-                                       flags);
-               }
-       }
-
-       return 0;
-}
-
-/*
- * internal_modify_qp with circumvention to handle aqp0 properly
- * smi_reset2init indicates if this is an internal reset-to-init-call for
- * smi. This flag must always be zero if called from ehca_modify_qp()!
- * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
- */
-static int internal_modify_qp(struct ib_qp *ibqp,
-                             struct ib_qp_attr *attr,
-                             int attr_mask, int smi_reset2init)
-{
-       enum ib_qp_state qp_cur_state, qp_new_state;
-       int cnt, qp_attr_idx, ret = 0;
-       enum ib_qp_statetrans statetrans;
-       struct hcp_modify_qp_control_block *mqpcb;
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca =
-               container_of(ibqp->pd->device, struct ehca_shca, ib_device);
-       u64 update_mask;
-       u64 h_ret;
-       int bad_wqe_cnt = 0;
-       int is_user = 0;
-       int squeue_locked = 0;
-       unsigned long flags = 0;
-
-       /* do query_qp to obtain current attr values */
-       mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-       if (!mqpcb) {
-               ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
-                        "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               mqpcb, my_qp->galpas.kernel);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(ibqp->device, "hipz_h_query_qp() failed "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, ibqp->qp_num, h_ret);
-               ret = ehca2ib_return_code(h_ret);
-               goto modify_qp_exit1;
-       }
-       if (ibqp->uobject)
-               is_user = 1;
-
-       qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
-
-       if (qp_cur_state == -EINVAL) {  /* invalid qp state */
-               ret = -EINVAL;
-               ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
-                        "ehca_qp=%p qp_num=%x",
-                        mqpcb->qp_state, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-       /*
-        * circumvention to set aqp0 initial state to init
-        * as expected by IB spec
-        */
-       if (smi_reset2init == 0 &&
-           ibqp->qp_type == IB_QPT_SMI &&
-           qp_cur_state == IB_QPS_RESET &&
-           (attr_mask & IB_QP_STATE) &&
-           attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
-               struct ib_qp_attr smiqp_attr = {
-                       .qp_state = IB_QPS_INIT,
-                       .port_num = my_qp->init_attr.port_num,
-                       .pkey_index = 0,
-                       .qkey = 0
-               };
-               int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
-                       IB_QP_PKEY_INDEX | IB_QP_QKEY;
-               int smirc = internal_modify_qp(
-                       ibqp, &smiqp_attr, smiqp_attr_mask, 1);
-               if (smirc) {
-                       ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
-                                "ehca_modify_qp() rc=%i", smirc);
-                       ret = H_PARAMETER;
-                       goto modify_qp_exit1;
-               }
-               qp_cur_state = IB_QPS_INIT;
-               ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
-       }
-       /* is transmitted current state  equal to "real" current state */
-       if ((attr_mask & IB_QP_CUR_STATE) &&
-           qp_cur_state != attr->cur_qp_state) {
-               ret = -EINVAL;
-               ehca_err(ibqp->device,
-                        "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
-                        " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
-                        attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
-                "new qp_state=%x attribute_mask=%x",
-                my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
-
-       qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
-       if (!smi_reset2init &&
-           !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
-                               attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
-               ret = -EINVAL;
-               ehca_err(ibqp->device,
-                        "Invalid qp transition new_state=%x cur_state=%x "
-                        "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
-                        qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
-               goto modify_qp_exit1;
-       }
-
-       mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
-       if (mqpcb->qp_state)
-               update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-       else {
-               ret = -EINVAL;
-               ehca_err(ibqp->device, "Invalid new qp state=%x "
-                        "ehca_qp=%p qp_num=%x",
-                        qp_new_state, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       /* retrieve state transition struct to get req and opt attrs */
-       statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
-       if (statetrans < 0) {
-               ret = -EINVAL;
-               ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
-                        "new_qp_state=%x State_xsition=%x ehca_qp=%p "
-                        "qp_num=%x", qp_cur_state, qp_new_state,
-                        statetrans, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
-
-       if (qp_attr_idx < 0) {
-               ret = qp_attr_idx;
-               ehca_err(ibqp->device,
-                        "Invalid QP type=%x ehca_qp=%p qp_num=%x",
-                        ibqp->qp_type, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       ehca_dbg(ibqp->device,
-                "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
-                my_qp, ibqp->qp_num, statetrans);
-
-       /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
-        * in non-LL UD QPs.
-        */
-       if ((my_qp->qp_type == IB_QPT_UD) &&
-           (my_qp->ext_type != EQPT_LLQP) &&
-           (statetrans == IB_QPST_INIT2RTR) &&
-           (shca->hw_level >= 0x22)) {
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-               mqpcb->send_grh_flag = 1;
-       }
-
-       /* sqe -> rts: set purge bit of bad wqe before actual trans */
-       if ((my_qp->qp_type == IB_QPT_UD ||
-            my_qp->qp_type == IB_QPT_GSI ||
-            my_qp->qp_type == IB_QPT_SMI) &&
-           statetrans == IB_QPST_SQE2RTS) {
-               /* mark next free wqe if kernel */
-               if (!ibqp->uobject) {
-                       struct ehca_wqe *wqe;
-                       /* lock send queue */
-                       spin_lock_irqsave(&my_qp->spinlock_s, flags);
-                       squeue_locked = 1;
-                       /* mark next free wqe */
-                       wqe = (struct ehca_wqe *)
-                               ipz_qeit_get(&my_qp->ipz_squeue);
-                       wqe->optype = wqe->wqef = 0xff;
-                       ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
-                                ibqp->qp_num, wqe);
-               }
-               ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
-               if (ret) {
-                       ehca_err(ibqp->device, "prepare_sqe_rts() failed "
-                                "ehca_qp=%p qp_num=%x ret=%i",
-                                my_qp, ibqp->qp_num, ret);
-                       goto modify_qp_exit2;
-               }
-       }
-
-       /*
-        * enable RDMA_Atomic_Control if reset->init und reliable con
-        * this is necessary since gen2 does not provide that flag,
-        * but pHyp requires it
-        */
-       if (statetrans == IB_QPST_RESET2INIT &&
-           (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
-               mqpcb->rdma_atomic_ctrl = 3;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
-       }
-       /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
-       if (statetrans == IB_QPST_INIT2RTR &&
-           (ibqp->qp_type == IB_QPT_UC) &&
-           !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
-               mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX) {
-               if (attr->pkey_index >= 16) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid pkey_index=%x. "
-                                "ehca_qp=%p qp_num=%x max_pkey_index=f",
-                                attr->pkey_index, my_qp, ibqp->qp_num);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->prim_p_key_idx = attr->pkey_index;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
-       }
-       if (attr_mask & IB_QP_PORT) {
-               struct ehca_sport *sport;
-               struct ehca_qp *aqp1;
-               if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid port=%x. "
-                                "ehca_qp=%p qp_num=%x num_ports=%x",
-                                attr->port_num, my_qp, ibqp->qp_num,
-                                shca->num_ports);
-                       goto modify_qp_exit2;
-               }
-               sport = &shca->sport[attr->port_num - 1];
-               if (!sport->ibqp_sqp[IB_QPT_GSI]) {
-                       /* should not occur */
-                       ret = -EFAULT;
-                       ehca_err(ibqp->device, "AQP1 was not created for "
-                                "port=%x", attr->port_num);
-                       goto modify_qp_exit2;
-               }
-               aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
-                                   struct ehca_qp, ib_qp);
-               if (ibqp->qp_type != IB_QPT_GSI &&
-                   ibqp->qp_type != IB_QPT_SMI &&
-                   aqp1->mod_qp_parm) {
-                       /*
-                        * firmware will reject this modify_qp() because
-                        * port is not activated/initialized fully
-                        */
-                       ret = -EFAULT;
-                       ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
-                                 "either port is being activated (try again) "
-                                 "or cabling issue", attr->port_num);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->prim_phys_port = attr->port_num;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
-       }
-       if (attr_mask & IB_QP_QKEY) {
-               mqpcb->qkey = attr->qkey;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
-       }
-       if (attr_mask & IB_QP_AV) {
-               mqpcb->dlid = attr->ah_attr.dlid;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
-               mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
-               mqpcb->service_level = attr->ah_attr.sl;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
-
-               if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
-                                 attr->ah_attr.static_rate,
-                                 &mqpcb->max_static_rate)) {
-                       ret = -EINVAL;
-                       goto modify_qp_exit2;
-               }
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
-
-               /*
-                * Always supply the GRH flag, even if it's zero, to give the
-                * hypervisor a clear "yes" or "no" instead of a "perhaps"
-                */
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-
-               /*
-                * only if GRH is TRUE we might consider SOURCE_GID_IDX
-                * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
-                */
-               if (attr->ah_attr.ah_flags == IB_AH_GRH) {
-                       mqpcb->send_grh_flag = 1;
-
-                       mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
-                       update_mask |=
-                               EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
-
-                       for (cnt = 0; cnt < 16; cnt++)
-                               mqpcb->dest_gid.byte[cnt] =
-                                       attr->ah_attr.grh.dgid.raw[cnt];
-
-                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
-                       mqpcb->flow_label = attr->ah_attr.grh.flow_label;
-                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
-                       mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
-                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
-                       mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
-                       update_mask |=
-                               EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
-               }
-       }
-
-       if (attr_mask & IB_QP_PATH_MTU) {
-               /* store ld(MTU) */
-               my_qp->mtu_shift = attr->path_mtu + 7;
-               mqpcb->path_mtu = attr->path_mtu;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
-       }
-       if (attr_mask & IB_QP_TIMEOUT) {
-               mqpcb->timeout = attr->timeout;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
-       }
-       if (attr_mask & IB_QP_RETRY_CNT) {
-               mqpcb->retry_count = attr->retry_cnt;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
-       }
-       if (attr_mask & IB_QP_RNR_RETRY) {
-               mqpcb->rnr_retry_count = attr->rnr_retry;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
-       }
-       if (attr_mask & IB_QP_RQ_PSN) {
-               mqpcb->receive_psn = attr->rq_psn;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
-       }
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-               mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
-                       attr->max_dest_rd_atomic : 2;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-       }
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-               mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
-                       attr->max_rd_atomic : 2;
-               update_mask |=
-                       EHCA_BMASK_SET
-                       (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
-       }
-       if (attr_mask & IB_QP_ALT_PATH) {
-               if (attr->alt_port_num < 1
-                   || attr->alt_port_num > shca->num_ports) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid alt_port=%x. "
-                                "ehca_qp=%p qp_num=%x num_ports=%x",
-                                attr->alt_port_num, my_qp, ibqp->qp_num,
-                                shca->num_ports);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->alt_phys_port = attr->alt_port_num;
-
-               if (attr->alt_pkey_index >= 16) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
-                                "ehca_qp=%p qp_num=%x max_pkey_index=f",
-                                attr->pkey_index, my_qp, ibqp->qp_num);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->alt_p_key_idx = attr->alt_pkey_index;
-
-               mqpcb->timeout_al = attr->alt_timeout;
-               mqpcb->dlid_al = attr->alt_ah_attr.dlid;
-               mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
-               mqpcb->service_level_al = attr->alt_ah_attr.sl;
-
-               if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
-                                 attr->alt_ah_attr.static_rate,
-                                 &mqpcb->max_static_rate_al)) {
-                       ret = -EINVAL;
-                       goto modify_qp_exit2;
-               }
-
-               /* OpenIB doesn't support alternate retry counts - copy them */
-               mqpcb->retry_count_al = mqpcb->retry_count;
-               mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
-
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
-
-               /*
-                * Always supply the GRH flag, even if it's zero, to give the
-                * hypervisor a clear "yes" or "no" instead of a "perhaps"
-                */
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
-
-               /*
-                * only if GRH is TRUE we might consider SOURCE_GID_IDX
-                * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
-                */
-               if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
-                       mqpcb->send_grh_flag_al = 1;
-
-                       for (cnt = 0; cnt < 16; cnt++)
-                               mqpcb->dest_gid_al.byte[cnt] =
-                                       attr->alt_ah_attr.grh.dgid.raw[cnt];
-                       mqpcb->source_gid_idx_al =
-                               attr->alt_ah_attr.grh.sgid_index;
-                       mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
-                       mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
-                       mqpcb->traffic_class_al =
-                               attr->alt_ah_attr.grh.traffic_class;
-
-                       update_mask |=
-                               EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
-                               | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
-                               | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
-                               | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
-                               EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
-               }
-       }
-
-       if (attr_mask & IB_QP_MIN_RNR_TIMER) {
-               mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
-       }
-
-       if (attr_mask & IB_QP_SQ_PSN) {
-               mqpcb->send_psn = attr->sq_psn;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
-       }
-
-       if (attr_mask & IB_QP_DEST_QPN) {
-               mqpcb->dest_qp_nr = attr->dest_qp_num;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
-       }
-
-       if (attr_mask & IB_QP_PATH_MIG_STATE) {
-               if (attr->path_mig_state != IB_MIG_REARM
-                   && attr->path_mig_state != IB_MIG_MIGRATED) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid mig_state=%x",
-                                attr->path_mig_state);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->path_migration_state = attr->path_mig_state + 1;
-               if (attr->path_mig_state == IB_MIG_REARM)
-                       my_qp->mig_armed = 1;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
-       }
-
-       if (attr_mask & IB_QP_CAP) {
-               mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
-               mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
-               /* no support for max_send/recv_sge yet */
-       }
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
-
-       h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                                my_qp->ipz_qp_handle,
-                                &my_qp->pf,
-                                update_mask,
-                                mqpcb, my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
-                        "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
-               goto modify_qp_exit2;
-       }
-
-       if ((my_qp->qp_type == IB_QPT_UD ||
-            my_qp->qp_type == IB_QPT_GSI ||
-            my_qp->qp_type == IB_QPT_SMI) &&
-           statetrans == IB_QPST_SQE2RTS) {
-               /* doorbell to reprocessing wqes */
-               iosync(); /* serialize GAL register access */
-               hipz_update_sqa(my_qp, bad_wqe_cnt-1);
-               ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
-       }
-
-       if (statetrans == IB_QPST_RESET2INIT ||
-           statetrans == IB_QPST_INIT2INIT) {
-               mqpcb->qp_enable = 1;
-               mqpcb->qp_state = EHCA_QPS_INIT;
-               update_mask = 0;
-               update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-
-               h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                                        my_qp->ipz_qp_handle,
-                                        &my_qp->pf,
-                                        update_mask,
-                                        mqpcb,
-                                        my_qp->galpas.kernel);
-
-               if (h_ret != H_SUCCESS) {
-                       ret = ehca2ib_return_code(h_ret);
-                       ehca_err(ibqp->device, "ENABLE in context of "
-                                "RESET_2_INIT failed! Maybe you didn't get "
-                                "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
-                                h_ret, my_qp, ibqp->qp_num);
-                       goto modify_qp_exit2;
-               }
-       }
-       if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
-           && !is_user) {
-               ret = check_for_left_cqes(my_qp, shca);
-               if (ret)
-                       goto modify_qp_exit2;
-       }
-
-       if (statetrans == IB_QPST_ANY2RESET) {
-               ipz_qeit_reset(&my_qp->ipz_rqueue);
-               ipz_qeit_reset(&my_qp->ipz_squeue);
-
-               if (qp_cur_state == IB_QPS_ERR && !is_user) {
-                       del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
-                       if (HAS_RQ(my_qp))
-                               del_from_err_list(my_qp->recv_cq,
-                                                 &my_qp->rq_err_node);
-               }
-               if (!is_user)
-                       reset_queue_map(&my_qp->sq_map);
-
-               if (HAS_RQ(my_qp) && !is_user)
-                       reset_queue_map(&my_qp->rq_map);
-       }
-
-       if (attr_mask & IB_QP_QKEY)
-               my_qp->qkey = attr->qkey;
-
-modify_qp_exit2:
-       if (squeue_locked) { /* this means: sqe -> rts */
-               spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-               my_qp->sqerr_purgeflag = 1;
-       }
-
-modify_qp_exit1:
-       ehca_free_fw_ctrlblock(mqpcb);
-
-       return ret;
-}
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-                  struct ib_udata *udata)
-{
-       int ret = 0;
-
-       struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-                                             ib_device);
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
-       /* The if-block below caches qp_attr to be modified for GSI and SMI
-        * qps during the initialization by ib_mad. When the respective port
-        * is activated, ie we got an event PORT_ACTIVE, we'll replay the
-        * cached modify calls sequence, see ehca_recover_sqs() below.
-        * Why that is required:
-        * 1) If one port is connected, older code requires that port one
-        *    to be connected and module option nr_ports=1 to be given by
-        *    user, which is very inconvenient for end user.
-        * 2) Firmware accepts modify_qp() only if respective port has become
-        *    active. Older code had a wait loop of 30sec create_qp()/
-        *    define_aqp1(), which is not appropriate in practice. This
-        *    code now removes that wait loop, see define_aqp1(), and always
-        *    reports all ports to ib_mad resp. users. Only activated ports
-        *    will then usable for the users.
-        */
-       if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
-               int port = my_qp->init_attr.port_num;
-               struct ehca_sport *sport = &shca->sport[port - 1];
-               unsigned long flags;
-               spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-               /* cache qp_attr only during init */
-               if (my_qp->mod_qp_parm) {
-                       struct ehca_mod_qp_parm *p;
-                       if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
-                               ehca_err(&shca->ib_device,
-                                        "mod_qp_parm overflow state=%x port=%x"
-                                        " type=%x", attr->qp_state,
-                                        my_qp->init_attr.port_num,
-                                        ibqp->qp_type);
-                               spin_unlock_irqrestore(&sport->mod_sqp_lock,
-                                                      flags);
-                               return -EINVAL;
-                       }
-                       p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
-                       p->mask = attr_mask;
-                       p->attr = *attr;
-                       my_qp->mod_qp_parm_idx++;
-                       ehca_dbg(&shca->ib_device,
-                                "Saved qp_attr for state=%x port=%x type=%x",
-                                attr->qp_state, my_qp->init_attr.port_num,
-                                ibqp->qp_type);
-                       spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-                       goto out;
-               }
-               spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-       }
-
-       ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
-
-out:
-       if ((ret == 0) && (attr_mask & IB_QP_STATE))
-               my_qp->state = attr->qp_state;
-
-       return ret;
-}
-
-void ehca_recover_sqp(struct ib_qp *sqp)
-{
-       struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
-       int port = my_sqp->init_attr.port_num;
-       struct ib_qp_attr attr;
-       struct ehca_mod_qp_parm *qp_parm;
-       int i, qp_parm_idx, ret;
-       unsigned long flags, wr_cnt;
-
-       if (!my_sqp->mod_qp_parm)
-               return;
-       ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
-
-       qp_parm = my_sqp->mod_qp_parm;
-       qp_parm_idx = my_sqp->mod_qp_parm_idx;
-       for (i = 0; i < qp_parm_idx; i++) {
-               attr = qp_parm[i].attr;
-               ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
-               if (ret) {
-                       ehca_err(sqp->device, "Could not modify SQP port=%x "
-                                "qp_num=%x ret=%x", port, sqp->qp_num, ret);
-                       goto free_qp_parm;
-               }
-               ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
-                        port, sqp->qp_num, attr.qp_state);
-       }
-
-       /* re-trigger posted recv wrs */
-       wr_cnt =  my_sqp->ipz_rqueue.current_q_offset /
-               my_sqp->ipz_rqueue.qe_size;
-       if (wr_cnt) {
-               spin_lock_irqsave(&my_sqp->spinlock_r, flags);
-               hipz_update_rqa(my_sqp, wr_cnt);
-               spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
-               ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
-                        port, sqp->qp_num, wr_cnt);
-       }
-
-free_qp_parm:
-       kfree(qp_parm);
-       /* this prevents subsequent calls to modify_qp() to cache qp_attr */
-       my_sqp->mod_qp_parm = NULL;
-}
-
-int ehca_query_qp(struct ib_qp *qp,
-                 struct ib_qp_attr *qp_attr,
-                 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
-{
-       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
-                                             ib_device);
-       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-       struct hcp_modify_qp_control_block *qpcb;
-       int cnt, ret = 0;
-       u64 h_ret;
-
-       if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
-               ehca_err(qp->device, "Invalid attribute mask "
-                        "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
-                        my_qp, qp->qp_num, qp_attr_mask);
-               return -EINVAL;
-       }
-
-       qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!qpcb) {
-               ehca_err(qp->device, "Out of memory for qpcb "
-                        "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_qp(adapter_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               qpcb, my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(qp->device, "hipz_h_query_qp() failed "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, qp->qp_num, h_ret);
-               goto query_qp_exit1;
-       }
-
-       qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
-       qp_attr->qp_state = qp_attr->cur_qp_state;
-
-       if (qp_attr->cur_qp_state == -EINVAL) {
-               ret = -EINVAL;
-               ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
-                        "ehca_qp=%p qp_num=%x",
-                        qpcb->qp_state, my_qp, qp->qp_num);
-               goto query_qp_exit1;
-       }
-
-       if (qp_attr->qp_state == IB_QPS_SQD)
-               qp_attr->sq_draining = 1;
-
-       qp_attr->qkey = qpcb->qkey;
-       qp_attr->path_mtu = qpcb->path_mtu;
-       qp_attr->path_mig_state = qpcb->path_migration_state - 1;
-       qp_attr->rq_psn = qpcb->receive_psn;
-       qp_attr->sq_psn = qpcb->send_psn;
-       qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
-       qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
-       qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
-       /* UD_AV CIRCUMVENTION */
-       if (my_qp->qp_type == IB_QPT_UD) {
-               qp_attr->cap.max_send_sge =
-                       qpcb->actual_nr_sges_in_sq_wqe - 2;
-               qp_attr->cap.max_recv_sge =
-                       qpcb->actual_nr_sges_in_rq_wqe - 2;
-       } else {
-               qp_attr->cap.max_send_sge =
-                       qpcb->actual_nr_sges_in_sq_wqe;
-               qp_attr->cap.max_recv_sge =
-                       qpcb->actual_nr_sges_in_rq_wqe;
-       }
-
-       qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
-       qp_attr->dest_qp_num = qpcb->dest_qp_nr;
-
-       qp_attr->pkey_index = qpcb->prim_p_key_idx;
-       qp_attr->port_num = qpcb->prim_phys_port;
-       qp_attr->timeout = qpcb->timeout;
-       qp_attr->retry_cnt = qpcb->retry_count;
-       qp_attr->rnr_retry = qpcb->rnr_retry_count;
-
-       qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
-       qp_attr->alt_port_num = qpcb->alt_phys_port;
-       qp_attr->alt_timeout = qpcb->timeout_al;
-
-       qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
-       qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
-
-       /* primary av */
-       qp_attr->ah_attr.sl = qpcb->service_level;
-
-       if (qpcb->send_grh_flag) {
-               qp_attr->ah_attr.ah_flags = IB_AH_GRH;
-       }
-
-       qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
-       qp_attr->ah_attr.dlid = qpcb->dlid;
-       qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
-       qp_attr->ah_attr.port_num = qp_attr->port_num;
-
-       /* primary GRH */
-       qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
-       qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
-       qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
-       qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
-
-       for (cnt = 0; cnt < 16; cnt++)
-               qp_attr->ah_attr.grh.dgid.raw[cnt] =
-                       qpcb->dest_gid.byte[cnt];
-
-       /* alternate AV */
-       qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
-       if (qpcb->send_grh_flag_al) {
-               qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
-       }
-
-       qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
-       qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
-       qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
-
-       /* alternate GRH */
-       qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
-       qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
-       qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
-       qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
-
-       for (cnt = 0; cnt < 16; cnt++)
-               qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
-                       qpcb->dest_gid_al.byte[cnt];
-
-       /* return init attributes given in ehca_create_qp */
-       if (qp_init_attr)
-               *qp_init_attr = my_qp->init_attr;
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
-
-query_qp_exit1:
-       ehca_free_fw_ctrlblock(qpcb);
-
-       return ret;
-}
-
-int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                   enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
-{
-       struct ehca_qp *my_qp =
-               container_of(ibsrq, struct ehca_qp, ib_srq);
-       struct ehca_shca *shca =
-               container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
-       struct hcp_modify_qp_control_block *mqpcb;
-       u64 update_mask;
-       u64 h_ret;
-       int ret = 0;
-
-       mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!mqpcb) {
-               ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
-                        "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
-               return -ENOMEM;
-       }
-
-       update_mask = 0;
-       if (attr_mask & IB_SRQ_LIMIT) {
-               attr_mask &= ~IB_SRQ_LIMIT;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
-               mqpcb->curr_srq_limit = attr->srq_limit;
-               mqpcb->qp_aff_asyn_ev_log_reg =
-                       EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
-       }
-
-       /* by now, all bits in attr_mask should have been cleared */
-       if (attr_mask) {
-               ehca_err(ibsrq->device, "invalid attribute mask bits set  "
-                        "attr_mask=%x", attr_mask);
-               ret = -EINVAL;
-               goto modify_srq_exit0;
-       }
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-       h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
-                                NULL, update_mask, mqpcb,
-                                my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
-                        "ehca_qp=%p qp_num=%x",
-                        h_ret, my_qp, my_qp->real_qp_num);
-       }
-
-modify_srq_exit0:
-       ehca_free_fw_ctrlblock(mqpcb);
-
-       return ret;
-}
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
-{
-       struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
-       struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
-                                             ib_device);
-       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-       struct hcp_modify_qp_control_block *qpcb;
-       int ret = 0;
-       u64 h_ret;
-
-       qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!qpcb) {
-               ehca_err(srq->device, "Out of memory for qpcb "
-                        "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
-                               NULL, qpcb, my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(srq->device, "hipz_h_query_qp() failed "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, h_ret);
-               goto query_srq_exit1;
-       }
-
-       srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
-       srq_attr->max_sge = 3;
-       srq_attr->srq_limit = qpcb->curr_srq_limit;
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-query_srq_exit1:
-       ehca_free_fw_ctrlblock(qpcb);
-
-       return ret;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
-                              struct ib_uobject *uobject)
-{
-       struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
-       struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
-                                            ib_pd);
-       struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
-       u32 qp_num = my_qp->real_qp_num;
-       int ret;
-       u64 h_ret;
-       u8 port_num;
-       int is_user = 0;
-       enum ib_qp_type qp_type;
-       unsigned long flags;
-
-       if (uobject) {
-               is_user = 1;
-               if (my_qp->mm_count_galpa ||
-                   my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
-                       ehca_err(dev, "Resources still referenced in "
-                                "user space qp_num=%x", qp_num);
-                       return -EINVAL;
-               }
-       }
-
-       if (my_qp->send_cq) {
-               ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
-               if (ret) {
-                       ehca_err(dev, "Couldn't unassign qp from "
-                                "send_cq ret=%i qp_num=%x cq_num=%x", ret,
-                                qp_num, my_qp->send_cq->cq_number);
-                       return ret;
-               }
-       }
-
-       write_lock_irqsave(&ehca_qp_idr_lock, flags);
-       idr_remove(&ehca_qp_idr, my_qp->token);
-       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-       /*
-        * SRQs will never get into an error list and do not have a recv_cq,
-        * so we need to skip them here.
-        */
-       if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
-               del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
-
-       if (HAS_SQ(my_qp) && !is_user)
-               del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
-       /* now wait until all pending events have completed */
-       wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
-
-       h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
-                        "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
-               return ehca2ib_return_code(h_ret);
-       }
-
-       port_num = my_qp->init_attr.port_num;
-       qp_type  = my_qp->init_attr.qp_type;
-
-       if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-               spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-               kfree(my_qp->mod_qp_parm);
-               my_qp->mod_qp_parm = NULL;
-               shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
-               spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-       }
-
-       /* no support for IB_QPT_SMI yet */
-       if (qp_type == IB_QPT_GSI) {
-               struct ib_event event;
-               ehca_info(dev, "device %s: port %x is inactive.",
-                               shca->ib_device.name, port_num);
-               event.device = &shca->ib_device;
-               event.event = IB_EVENT_PORT_ERR;
-               event.element.port_num = port_num;
-               shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
-               ib_dispatch_event(&event);
-       }
-
-       if (HAS_RQ(my_qp)) {
-               ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-               if (!is_user)
-                       vfree(my_qp->rq_map.map);
-       }
-       if (HAS_SQ(my_qp)) {
-               ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-               if (!is_user)
-                       vfree(my_qp->sq_map.map);
-       }
-       kmem_cache_free(qp_cache, my_qp);
-       atomic_dec(&shca->num_qps);
-       return 0;
-}
-
-int ehca_destroy_qp(struct ib_qp *qp)
-{
-       return internal_destroy_qp(qp->device,
-                                  container_of(qp, struct ehca_qp, ib_qp),
-                                  qp->uobject);
-}
-
-int ehca_destroy_srq(struct ib_srq *srq)
-{
-       return internal_destroy_qp(srq->device,
-                                  container_of(srq, struct ehca_qp, ib_srq),
-                                  srq->uobject);
-}
-
-int ehca_init_qp_cache(void)
-{
-       qp_cache = kmem_cache_create("ehca_cache_qp",
-                                    sizeof(struct ehca_qp), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!qp_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_qp_cache(void)
-{
-       kmem_cache_destroy(qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
deleted file mode 100644 (file)
index 11813b8..0000000
+++ /dev/null
@@ -1,953 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  post_send/recv, poll_cq, req_notify
- *
- *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-/* in RC traffic, insert an empty RDMA READ every this many packets */
-#define ACK_CIRC_THRESHOLD 2000000
-
-static u64 replace_wr_id(u64 wr_id, u16 idx)
-{
-       u64 ret;
-
-       ret = wr_id & ~QMAP_IDX_MASK;
-       ret |= idx & QMAP_IDX_MASK;
-
-       return ret;
-}
-
-static u16 get_app_wr_id(u64 wr_id)
-{
-       return wr_id & QMAP_IDX_MASK;
-}
-
-static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
-                                 struct ehca_wqe *wqe_p,
-                                 struct ib_recv_wr *recv_wr,
-                                 u32 rq_map_idx)
-{
-       u8 cnt_ds;
-       if (unlikely((recv_wr->num_sge < 0) ||
-                    (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
-               ehca_gen_err("Invalid number of WQE SGE. "
-                        "num_sqe=%x max_nr_of_sg=%x",
-                        recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
-               return -EINVAL; /* invalid SG list length */
-       }
-
-       /* clear wqe header until sglist */
-       memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
-       wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
-       wqe_p->nr_of_data_seg = recv_wr->num_sge;
-
-       for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
-               wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
-                       recv_wr->sg_list[cnt_ds].addr;
-               wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
-                       recv_wr->sg_list[cnt_ds].lkey;
-               wqe_p->u.all_rcv.sg_list[cnt_ds].length =
-                       recv_wr->sg_list[cnt_ds].length;
-       }
-
-       if (ehca_debug_level >= 3) {
-               ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
-                            ipz_rqueue);
-               ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
-       }
-
-       return 0;
-}
-
-#if defined(DEBUG_GSI_SEND_WR)
-
-/* need ib_mad struct */
-#include <rdma/ib_mad.h>
-
-static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
-{
-       int idx;
-       int j;
-       while (ud_wr) {
-               struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
-               struct ib_sge *sge = ud_wr->wr.sg_list;
-               ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
-                            "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
-                            ud_wr->wr.num_sge, ud_wr->wr.send_flags,
-                            ud_wr->.wr.opcode);
-               if (mad_hdr) {
-                       ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
-                                    "mgmt_class=%x class_version=%x method=%x "
-                                    "status=%x class_specific=%x tid=%lx "
-                                    "attr_id=%x resv=%x attr_mod=%x",
-                                    idx, mad_hdr->base_version,
-                                    mad_hdr->mgmt_class,
-                                    mad_hdr->class_version, mad_hdr->method,
-                                    mad_hdr->status, mad_hdr->class_specific,
-                                    mad_hdr->tid, mad_hdr->attr_id,
-                                    mad_hdr->resv,
-                                    mad_hdr->attr_mod);
-               }
-               for (j = 0; j < ud_wr->wr.num_sge; j++) {
-                       u8 *data = __va(sge->addr);
-                       ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
-                                    "lkey=%x",
-                                    idx, j, data, sge->length, sge->lkey);
-                       /* assume length is n*16 */
-                       ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
-                                idx, j);
-                       sge++;
-               } /* eof for j */
-               idx++;
-               ud_wr = ud_wr(ud_wr->wr.next);
-       } /* eof while ud_wr */
-}
-
-#endif /* DEBUG_GSI_SEND_WR */
-
-static inline int ehca_write_swqe(struct ehca_qp *qp,
-                                 struct ehca_wqe *wqe_p,
-                                 struct ib_send_wr *send_wr,
-                                 u32 sq_map_idx,
-                                 int hidden)
-{
-       u32 idx;
-       u64 dma_length;
-       struct ehca_av *my_av;
-       u32 remote_qkey;
-       struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
-
-       if (unlikely((send_wr->num_sge < 0) ||
-                    (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
-               ehca_gen_err("Invalid number of WQE SGE. "
-                        "num_sqe=%x max_nr_of_sg=%x",
-                        send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
-               return -EINVAL; /* invalid SG list length */
-       }
-
-       /* clear wqe header until sglist */
-       memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
-       wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
-
-       qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
-       qmap_entry->reported = 0;
-       qmap_entry->cqe_req = 0;
-
-       switch (send_wr->opcode) {
-       case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
-               wqe_p->optype = WQE_OPTYPE_SEND;
-               break;
-       case IB_WR_RDMA_WRITE:
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
-               break;
-       case IB_WR_RDMA_READ:
-               wqe_p->optype = WQE_OPTYPE_RDMAREAD;
-               break;
-       default:
-               ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
-               return -EINVAL; /* invalid opcode */
-       }
-
-       wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
-
-       wqe_p->wr_flag = 0;
-
-       if ((send_wr->send_flags & IB_SEND_SIGNALED ||
-           qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
-           && !hidden) {
-               wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
-               qmap_entry->cqe_req = 1;
-       }
-
-       if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
-           send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
-               /* this might not work as long as HW does not support it */
-               wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
-               wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
-       }
-
-       wqe_p->nr_of_data_seg = send_wr->num_sge;
-
-       switch (qp->qp_type) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               /* no break is intential here */
-       case IB_QPT_UD:
-               /* IB 1.2 spec C10-15 compliance */
-               remote_qkey = ud_wr(send_wr)->remote_qkey;
-               if (remote_qkey & 0x80000000)
-                       remote_qkey = qp->qkey;
-
-               wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
-               wqe_p->local_ee_context_qkey = remote_qkey;
-               if (unlikely(!ud_wr(send_wr)->ah)) {
-                       ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
-                       return -EINVAL;
-               }
-               if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
-                       ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
-                       return -EINVAL;
-               }
-               my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
-               wqe_p->u.ud_av.ud_av = my_av->av;
-
-               /*
-                * omitted check of IB_SEND_INLINE
-                * since HW does not support it
-                */
-               for (idx = 0; idx < send_wr->num_sge; idx++) {
-                       wqe_p->u.ud_av.sg_list[idx].vaddr =
-                               send_wr->sg_list[idx].addr;
-                       wqe_p->u.ud_av.sg_list[idx].lkey =
-                               send_wr->sg_list[idx].lkey;
-                       wqe_p->u.ud_av.sg_list[idx].length =
-                               send_wr->sg_list[idx].length;
-               } /* eof for idx */
-               if (qp->qp_type == IB_QPT_SMI ||
-                   qp->qp_type == IB_QPT_GSI)
-                       wqe_p->u.ud_av.ud_av.pmtu = 1;
-               if (qp->qp_type == IB_QPT_GSI) {
-                       wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
-#ifdef DEBUG_GSI_SEND_WR
-                       trace_ud_wr(ud_wr(send_wr));
-#endif /* DEBUG_GSI_SEND_WR */
-               }
-               break;
-
-       case IB_QPT_UC:
-               if (send_wr->send_flags & IB_SEND_FENCE)
-                       wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
-               /* no break is intentional here */
-       case IB_QPT_RC:
-               /* TODO: atomic not implemented */
-               wqe_p->u.nud.remote_virtual_address =
-                       rdma_wr(send_wr)->remote_addr;
-               wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
-
-               /*
-                * omitted checking of IB_SEND_INLINE
-                * since HW does not support it
-                */
-               dma_length = 0;
-               for (idx = 0; idx < send_wr->num_sge; idx++) {
-                       wqe_p->u.nud.sg_list[idx].vaddr =
-                               send_wr->sg_list[idx].addr;
-                       wqe_p->u.nud.sg_list[idx].lkey =
-                               send_wr->sg_list[idx].lkey;
-                       wqe_p->u.nud.sg_list[idx].length =
-                               send_wr->sg_list[idx].length;
-                       dma_length += send_wr->sg_list[idx].length;
-               } /* eof idx */
-               wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
-
-               /* unsolicited ack circumvention */
-               if (send_wr->opcode == IB_WR_RDMA_READ) {
-                       /* on RDMA read, switch on and reset counters */
-                       qp->message_count = qp->packet_count = 0;
-                       qp->unsol_ack_circ = 1;
-               } else
-                       /* else estimate #packets */
-                       qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
-
-               break;
-
-       default:
-               ehca_gen_err("Invalid qptype=%x", qp->qp_type);
-               return -EINVAL;
-       }
-
-       if (ehca_debug_level >= 3) {
-               ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
-               ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
-       }
-       return 0;
-}
-
-/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
-static inline void map_ib_wc_status(u32 cqe_status,
-                                   enum ib_wc_status *wc_status)
-{
-       if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
-               switch (cqe_status & 0x3F) {
-               case 0x01:
-               case 0x21:
-                       *wc_status = IB_WC_LOC_LEN_ERR;
-                       break;
-               case 0x02:
-               case 0x22:
-                       *wc_status = IB_WC_LOC_QP_OP_ERR;
-                       break;
-               case 0x03:
-               case 0x23:
-                       *wc_status = IB_WC_LOC_EEC_OP_ERR;
-                       break;
-               case 0x04:
-               case 0x24:
-                       *wc_status = IB_WC_LOC_PROT_ERR;
-                       break;
-               case 0x05:
-               case 0x25:
-                       *wc_status = IB_WC_WR_FLUSH_ERR;
-                       break;
-               case 0x06:
-                       *wc_status = IB_WC_MW_BIND_ERR;
-                       break;
-               case 0x07: /* remote error - look into bits 20:24 */
-                       switch ((cqe_status
-                                & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
-                       case 0x0:
-                               /*
-                                * PSN Sequence Error!
-                                * couldn't find a matching status!
-                                */
-                               *wc_status = IB_WC_GENERAL_ERR;
-                               break;
-                       case 0x1:
-                               *wc_status = IB_WC_REM_INV_REQ_ERR;
-                               break;
-                       case 0x2:
-                               *wc_status = IB_WC_REM_ACCESS_ERR;
-                               break;
-                       case 0x3:
-                               *wc_status = IB_WC_REM_OP_ERR;
-                               break;
-                       case 0x4:
-                               *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
-                               break;
-                       }
-                       break;
-               case 0x08:
-                       *wc_status = IB_WC_RETRY_EXC_ERR;
-                       break;
-               case 0x09:
-                       *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
-                       break;
-               case 0x0A:
-               case 0x2D:
-                       *wc_status = IB_WC_REM_ABORT_ERR;
-                       break;
-               case 0x0B:
-               case 0x2E:
-                       *wc_status = IB_WC_INV_EECN_ERR;
-                       break;
-               case 0x0C:
-               case 0x2F:
-                       *wc_status = IB_WC_INV_EEC_STATE_ERR;
-                       break;
-               case 0x0D:
-                       *wc_status = IB_WC_BAD_RESP_ERR;
-                       break;
-               case 0x10:
-                       /* WQE purged */
-                       *wc_status = IB_WC_WR_FLUSH_ERR;
-                       break;
-               default:
-                       *wc_status = IB_WC_FATAL_ERR;
-
-               }
-       } else
-               *wc_status = IB_WC_SUCCESS;
-}
-
-static inline int post_one_send(struct ehca_qp *my_qp,
-                        struct ib_send_wr *cur_send_wr,
-                        int hidden)
-{
-       struct ehca_wqe *wqe_p;
-       int ret;
-       u32 sq_map_idx;
-       u64 start_offset = my_qp->ipz_squeue.current_q_offset;
-
-       /* get pointer next to free WQE */
-       wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
-       if (unlikely(!wqe_p)) {
-               /* too many posted work requests: queue overflow */
-               ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
-                        "qp_num=%x", my_qp->ib_qp.qp_num);
-               return -ENOMEM;
-       }
-
-       /*
-        * Get the index of the WQE in the send queue. The same index is used
-        * for writing into the sq_map.
-        */
-       sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
-
-       /* write a SEND WQE into the QUEUE */
-       ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
-       /*
-        * if something failed,
-        * reset the free entry pointer to the start value
-        */
-       if (unlikely(ret)) {
-               my_qp->ipz_squeue.current_q_offset = start_offset;
-               ehca_err(my_qp->ib_qp.device, "Could not write WQE "
-                        "qp_num=%x", my_qp->ib_qp.qp_num);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int ehca_post_send(struct ib_qp *qp,
-                  struct ib_send_wr *send_wr,
-                  struct ib_send_wr **bad_send_wr)
-{
-       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-       int wqe_cnt = 0;
-       int ret = 0;
-       unsigned long flags;
-
-       /* Reject WR if QP is in RESET, INIT or RTR state */
-       if (unlikely(my_qp->state < IB_QPS_RTS)) {
-               ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
-                        my_qp->state, qp->qp_num);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* LOCK the QUEUE */
-       spin_lock_irqsave(&my_qp->spinlock_s, flags);
-
-       /* Send an empty extra RDMA read if:
-        *  1) there has been an RDMA read on this connection before
-        *  2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
-        *  3) we can be sure that any previous extra RDMA read has been
-        *     processed so we don't overflow the SQ
-        */
-       if (unlikely(my_qp->unsol_ack_circ &&
-                    my_qp->packet_count > ACK_CIRC_THRESHOLD &&
-                    my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
-               /* insert an empty RDMA READ to fix up the remote QP state */
-               struct ib_send_wr circ_wr;
-               memset(&circ_wr, 0, sizeof(circ_wr));
-               circ_wr.opcode = IB_WR_RDMA_READ;
-               post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
-               wqe_cnt++;
-               ehca_dbg(qp->device, "posted circ wr  qp_num=%x", qp->qp_num);
-               my_qp->message_count = my_qp->packet_count = 0;
-       }
-
-       /* loop processes list of send reqs */
-       while (send_wr) {
-               ret = post_one_send(my_qp, send_wr, 0);
-               if (unlikely(ret)) {
-                       goto post_send_exit0;
-               }
-               wqe_cnt++;
-               send_wr = send_wr->next;
-       }
-
-post_send_exit0:
-       iosync(); /* serialize GAL register access */
-       hipz_update_sqa(my_qp, wqe_cnt);
-       if (unlikely(ret || ehca_debug_level >= 2))
-               ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
-                        my_qp, qp->qp_num, wqe_cnt, ret);
-       my_qp->message_count += wqe_cnt;
-       spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-
-out:
-       if (ret)
-               *bad_send_wr = send_wr;
-       return ret;
-}
-
-static int internal_post_recv(struct ehca_qp *my_qp,
-                             struct ib_device *dev,
-                             struct ib_recv_wr *recv_wr,
-                             struct ib_recv_wr **bad_recv_wr)
-{
-       struct ehca_wqe *wqe_p;
-       int wqe_cnt = 0;
-       int ret = 0;
-       u32 rq_map_idx;
-       unsigned long flags;
-       struct ehca_qmap_entry *qmap_entry;
-
-       if (unlikely(!HAS_RQ(my_qp))) {
-               ehca_err(dev, "QP has no RQ  ehca_qp=%p qp_num=%x ext_type=%d",
-                        my_qp, my_qp->real_qp_num, my_qp->ext_type);
-               ret = -ENODEV;
-               goto out;
-       }
-
-       /* LOCK the QUEUE */
-       spin_lock_irqsave(&my_qp->spinlock_r, flags);
-
-       /* loop processes list of recv reqs */
-       while (recv_wr) {
-               u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
-               /* get pointer next to free WQE */
-               wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
-               if (unlikely(!wqe_p)) {
-                       /* too many posted work requests: queue overflow */
-                       ret = -ENOMEM;
-                       ehca_err(dev, "Too many posted WQEs "
-                               "qp_num=%x", my_qp->real_qp_num);
-                       goto post_recv_exit0;
-               }
-               /*
-                * Get the index of the WQE in the recv queue. The same index
-                * is used for writing into the rq_map.
-                */
-               rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
-
-               /* write a RECV WQE into the QUEUE */
-               ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
-                               rq_map_idx);
-               /*
-                * if something failed,
-                * reset the free entry pointer to the start value
-                */
-               if (unlikely(ret)) {
-                       my_qp->ipz_rqueue.current_q_offset = start_offset;
-                       ret = -EINVAL;
-                       ehca_err(dev, "Could not write WQE "
-                               "qp_num=%x", my_qp->real_qp_num);
-                       goto post_recv_exit0;
-               }
-
-               qmap_entry = &my_qp->rq_map.map[rq_map_idx];
-               qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
-               qmap_entry->reported = 0;
-               qmap_entry->cqe_req = 1;
-
-               wqe_cnt++;
-               recv_wr = recv_wr->next;
-       } /* eof for recv_wr */
-
-post_recv_exit0:
-       iosync(); /* serialize GAL register access */
-       hipz_update_rqa(my_qp, wqe_cnt);
-       if (unlikely(ret || ehca_debug_level >= 2))
-           ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
-                    my_qp, my_qp->real_qp_num, wqe_cnt, ret);
-       spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
-
-out:
-       if (ret)
-               *bad_recv_wr = recv_wr;
-
-       return ret;
-}
-
-int ehca_post_recv(struct ib_qp *qp,
-                  struct ib_recv_wr *recv_wr,
-                  struct ib_recv_wr **bad_recv_wr)
-{
-       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-
-       /* Reject WR if QP is in RESET state */
-       if (unlikely(my_qp->state == IB_QPS_RESET)) {
-               ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
-                        my_qp->state, qp->qp_num);
-               *bad_recv_wr = recv_wr;
-               return -EINVAL;
-       }
-
-       return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
-}
-
-int ehca_post_srq_recv(struct ib_srq *srq,
-                      struct ib_recv_wr *recv_wr,
-                      struct ib_recv_wr **bad_recv_wr)
-{
-       return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
-                                 srq->device, recv_wr, bad_recv_wr);
-}
-
-/*
- * ib_wc_opcode table converts ehca wc opcode to ib
- * Since we use zero to indicate invalid opcode, the actual ib opcode must
- * be decremented!!!
- */
-static const u8 ib_wc_opcode[255] = {
-       [0x01] = IB_WC_RECV+1,
-       [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
-       [0x08] = IB_WC_FETCH_ADD+1,
-       [0x10] = IB_WC_COMP_SWAP+1,
-       [0x20] = IB_WC_RDMA_WRITE+1,
-       [0x40] = IB_WC_RDMA_READ+1,
-       [0x80] = IB_WC_SEND+1
-};
-
-/* internal function to poll one entry of cq */
-static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
-{
-       int ret = 0, qmap_tail_idx;
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       struct ehca_cqe *cqe;
-       struct ehca_qp *my_qp;
-       struct ehca_qmap_entry *qmap_entry;
-       struct ehca_queue_map *qmap;
-       int cqe_count = 0, is_error;
-
-repoll:
-       cqe = (struct ehca_cqe *)
-               ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
-       if (!cqe) {
-               ret = -EAGAIN;
-               if (ehca_debug_level >= 3)
-                       ehca_dbg(cq->device, "Completion queue is empty  "
-                                "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
-               goto poll_cq_one_exit0;
-       }
-
-       /* prevents loads being reordered across this point */
-       rmb();
-
-       cqe_count++;
-       if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
-               struct ehca_qp *qp;
-               int purgeflag;
-               unsigned long flags;
-
-               qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
-               if (!qp) {
-                       ehca_err(cq->device, "cq_num=%x qp_num=%x "
-                                "could not find qp -> ignore cqe",
-                                my_cq->cq_number, cqe->local_qp_number);
-                       ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
-                                my_cq->cq_number, cqe->local_qp_number);
-                       /* ignore this purged cqe */
-                       goto repoll;
-               }
-               spin_lock_irqsave(&qp->spinlock_s, flags);
-               purgeflag = qp->sqerr_purgeflag;
-               spin_unlock_irqrestore(&qp->spinlock_s, flags);
-
-               if (purgeflag) {
-                       ehca_dbg(cq->device,
-                                "Got CQE with purged bit qp_num=%x src_qp=%x",
-                                cqe->local_qp_number, cqe->remote_qp_number);
-                       if (ehca_debug_level >= 2)
-                               ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
-                                        cqe->local_qp_number,
-                                        cqe->remote_qp_number);
-                       /*
-                        * ignore this to avoid double cqes of bad wqe
-                        * that caused sqe and turn off purge flag
-                        */
-                       qp->sqerr_purgeflag = 0;
-                       goto repoll;
-               }
-       }
-
-       is_error = cqe->status & WC_STATUS_ERROR_BIT;
-
-       /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
-       if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
-               ehca_dbg(cq->device,
-                        "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
-                        is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
-               ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
-                        my_cq, my_cq->cq_number);
-               ehca_dbg(cq->device,
-                        "ehca_cq=%p cq_num=%x -------------------------",
-                        my_cq, my_cq->cq_number);
-       }
-
-       read_lock(&ehca_qp_idr_lock);
-       my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
-       read_unlock(&ehca_qp_idr_lock);
-       if (!my_qp)
-               goto repoll;
-       wc->qp = &my_qp->ib_qp;
-
-       qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
-       if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
-               /* We got a send completion. */
-               qmap = &my_qp->sq_map;
-       else
-               /* We got a receive completion. */
-               qmap = &my_qp->rq_map;
-
-       /* advance the tail pointer */
-       qmap->tail = qmap_tail_idx;
-
-       if (is_error) {
-               /*
-                * set left_to_poll to 0 because in error state, we will not
-                * get any additional CQEs
-                */
-               my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
-                                                       my_qp->sq_map.entries);
-               my_qp->sq_map.left_to_poll = 0;
-               ehca_add_to_err_list(my_qp, 1);
-
-               my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
-                                                       my_qp->rq_map.entries);
-               my_qp->rq_map.left_to_poll = 0;
-               if (HAS_RQ(my_qp))
-                       ehca_add_to_err_list(my_qp, 0);
-       }
-
-       qmap_entry = &qmap->map[qmap_tail_idx];
-       if (qmap_entry->reported) {
-               ehca_warn(cq->device, "Double cqe on qp_num=%#x",
-                               my_qp->real_qp_num);
-               /* found a double cqe, discard it and read next one */
-               goto repoll;
-       }
-
-       wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
-       qmap_entry->reported = 1;
-
-       /* if left_to_poll is decremented to 0, add the QP to the error list */
-       if (qmap->left_to_poll > 0) {
-               qmap->left_to_poll--;
-               if ((my_qp->sq_map.left_to_poll == 0) &&
-                               (my_qp->rq_map.left_to_poll == 0)) {
-                       ehca_add_to_err_list(my_qp, 1);
-                       if (HAS_RQ(my_qp))
-                               ehca_add_to_err_list(my_qp, 0);
-               }
-       }
-
-       /* eval ib_wc_opcode */
-       wc->opcode = ib_wc_opcode[cqe->optype]-1;
-       if (unlikely(wc->opcode == -1)) {
-               ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
-                        "ehca_cq=%p cq_num=%x",
-                        cqe->optype, cqe->status, my_cq, my_cq->cq_number);
-               /* dump cqe for other infos */
-               ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
-                        my_cq, my_cq->cq_number);
-               /* update also queue adder to throw away this entry!!! */
-               goto repoll;
-       }
-
-       /* eval ib_wc_status */
-       if (unlikely(is_error)) {
-               /* complete with errors */
-               map_ib_wc_status(cqe->status, &wc->status);
-               wc->vendor_err = wc->status;
-       } else
-               wc->status = IB_WC_SUCCESS;
-
-       wc->byte_len = cqe->nr_bytes_transferred;
-       wc->pkey_index = cqe->pkey_index;
-       wc->slid = cqe->rlid;
-       wc->dlid_path_bits = cqe->dlid;
-       wc->src_qp = cqe->remote_qp_number;
-       /*
-        * HW has "Immed data present" and "GRH present" in bits 6 and 5.
-        * SW defines those in bits 1 and 0, so we can just shift and mask.
-        */
-       wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
-       wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
-       wc->sl = cqe->service_level;
-
-poll_cq_one_exit0:
-       if (cqe_count > 0)
-               hipz_update_feca(my_cq, cqe_count);
-
-       return ret;
-}
-
-static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
-                              struct ib_wc *wc, int num_entries,
-                              struct ipz_queue *ipz_queue, int on_sq)
-{
-       int nr = 0;
-       struct ehca_wqe *wqe;
-       u64 offset;
-       struct ehca_queue_map *qmap;
-       struct ehca_qmap_entry *qmap_entry;
-
-       if (on_sq)
-               qmap = &my_qp->sq_map;
-       else
-               qmap = &my_qp->rq_map;
-
-       qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
-       while ((nr < num_entries) && (qmap_entry->reported == 0)) {
-               /* generate flush CQE */
-
-               memset(wc, 0, sizeof(*wc));
-
-               offset = qmap->next_wqe_idx * ipz_queue->qe_size;
-               wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
-               if (!wqe) {
-                       ehca_err(cq->device, "Invalid wqe offset=%#llx on "
-                                "qp_num=%#x", offset, my_qp->real_qp_num);
-                       return nr;
-               }
-
-               wc->wr_id = replace_wr_id(wqe->work_request_id,
-                                         qmap_entry->app_wr_id);
-
-               if (on_sq) {
-                       switch (wqe->optype) {
-                       case WQE_OPTYPE_SEND:
-                               wc->opcode = IB_WC_SEND;
-                               break;
-                       case WQE_OPTYPE_RDMAWRITE:
-                               wc->opcode = IB_WC_RDMA_WRITE;
-                               break;
-                       case WQE_OPTYPE_RDMAREAD:
-                               wc->opcode = IB_WC_RDMA_READ;
-                               break;
-                       default:
-                               ehca_err(cq->device, "Invalid optype=%x",
-                                               wqe->optype);
-                               return nr;
-                       }
-               } else
-                       wc->opcode = IB_WC_RECV;
-
-               if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
-                       wc->ex.imm_data = wqe->immediate_data;
-                       wc->wc_flags |= IB_WC_WITH_IMM;
-               }
-
-               wc->status = IB_WC_WR_FLUSH_ERR;
-
-               wc->qp = &my_qp->ib_qp;
-
-               /* mark as reported and advance next_wqe pointer */
-               qmap_entry->reported = 1;
-               qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
-                                               qmap->entries);
-               qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
-               wc++; nr++;
-       }
-
-       return nr;
-
-}
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
-{
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       int nr;
-       struct ehca_qp *err_qp;
-       struct ib_wc *current_wc = wc;
-       int ret = 0;
-       unsigned long flags;
-       int entries_left = num_entries;
-
-       if (num_entries < 1) {
-               ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
-                        "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
-               ret = -EINVAL;
-               goto poll_cq_exit0;
-       }
-
-       spin_lock_irqsave(&my_cq->spinlock, flags);
-
-       /* generate flush cqes for send queues */
-       list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
-               nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
-                               &err_qp->ipz_squeue, 1);
-               entries_left -= nr;
-               current_wc += nr;
-
-               if (entries_left == 0)
-                       break;
-       }
-
-       /* generate flush cqes for receive queues */
-       list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
-               nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
-                               &err_qp->ipz_rqueue, 0);
-               entries_left -= nr;
-               current_wc += nr;
-
-               if (entries_left == 0)
-                       break;
-       }
-
-       for (nr = 0; nr < entries_left; nr++) {
-               ret = ehca_poll_cq_one(cq, current_wc);
-               if (ret)
-                       break;
-               current_wc++;
-       } /* eof for nr */
-       entries_left -= nr;
-
-       spin_unlock_irqrestore(&my_cq->spinlock, flags);
-       if (ret == -EAGAIN  || !ret)
-               ret = num_entries - entries_left;
-
-poll_cq_exit0:
-       return ret;
-}
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
-{
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       int ret = 0;
-
-       switch (notify_flags & IB_CQ_SOLICITED_MASK) {
-       case IB_CQ_SOLICITED:
-               hipz_set_cqx_n0(my_cq, 1);
-               break;
-       case IB_CQ_NEXT_COMP:
-               hipz_set_cqx_n1(my_cq, 1);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
-               unsigned long spl_flags;
-               spin_lock_irqsave(&my_cq->spinlock, spl_flags);
-               ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
-               spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
-       }
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
deleted file mode 100644 (file)
index 376b031..0000000
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  SQP functions
- *
- *  Authors: Khadija Souissi <souissi@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rdma/ib_mad.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define IB_MAD_STATUS_REDIRECT         cpu_to_be16(0x0002)
-#define IB_MAD_STATUS_UNSUP_VERSION    cpu_to_be16(0x0004)
-#define IB_MAD_STATUS_UNSUP_METHOD     cpu_to_be16(0x0008)
-
-#define IB_PMA_CLASS_PORT_INFO         cpu_to_be16(0x0001)
-
-/**
- * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
- * pair is created successfully, the corresponding port gets active.
- *
- * Define Special Queue pair 0 (SMI QP) is still not supported.
- *
- * @qp_init_attr: Queue pair init attributes with port and queue pair type
- */
-
-u64 ehca_define_sqp(struct ehca_shca *shca,
-                   struct ehca_qp *ehca_qp,
-                   struct ib_qp_init_attr *qp_init_attr)
-{
-       u32 pma_qp_nr, bma_qp_nr;
-       u64 ret;
-       u8 port = qp_init_attr->port_num;
-       int counter;
-
-       shca->sport[port - 1].port_state = IB_PORT_DOWN;
-
-       switch (qp_init_attr->qp_type) {
-       case IB_QPT_SMI:
-               /* function not supported yet */
-               break;
-       case IB_QPT_GSI:
-               ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
-                                        ehca_qp->ipz_qp_handle,
-                                        ehca_qp->galpas.kernel,
-                                        (u32) qp_init_attr->port_num,
-                                        &pma_qp_nr, &bma_qp_nr);
-
-               if (ret != H_SUCCESS) {
-                       ehca_err(&shca->ib_device,
-                                "Can't define AQP1 for port %x. h_ret=%lli",
-                                port, ret);
-                       return ret;
-               }
-               shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
-               ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
-                        port, pma_qp_nr);
-               break;
-       default:
-               ehca_err(&shca->ib_device, "invalid qp_type=%x",
-                        qp_init_attr->qp_type);
-               return H_PARAMETER;
-       }
-
-       if (ehca_nr_ports < 0) /* autodetect mode */
-               return H_SUCCESS;
-
-       for (counter = 0;
-            shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
-                    counter < ehca_port_act_time;
-            counter++) {
-               ehca_dbg(&shca->ib_device, "... wait until port %x is active",
-                        port);
-               msleep_interruptible(1000);
-       }
-
-       if (counter == ehca_port_act_time) {
-               ehca_err(&shca->ib_device, "Port %x is not active.", port);
-               return H_HARDWARE;
-       }
-
-       return H_SUCCESS;
-}
-
-struct ib_perf {
-       struct ib_mad_hdr mad_hdr;
-       u8 reserved[40];
-       u8 data[192];
-} __attribute__ ((packed));
-
-/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
-struct tcslfl {
-       u32 tc:8;
-       u32 sl:4;
-       u32 fl:20;
-} __attribute__ ((packed));
-
-/* IP Version/TC/FL packed into 32 bits, as in GRH */
-struct vertcfl {
-       u32 ver:4;
-       u32 tc:8;
-       u32 fl:20;
-} __attribute__ ((packed));
-
-static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
-                            const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                            const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
-       const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
-       struct ib_perf *out_perf = (struct ib_perf *)out_mad;
-       struct ib_class_port_info *poi =
-               (struct ib_class_port_info *)out_perf->data;
-       struct tcslfl *tcslfl =
-               (struct tcslfl *)&poi->redirect_tcslfl;
-       struct ehca_shca *shca =
-               container_of(ibdev, struct ehca_shca, ib_device);
-       struct ehca_sport *sport = &shca->sport[port_num - 1];
-
-       ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
-
-       *out_mad = *in_mad;
-
-       if (in_perf->mad_hdr.class_version != 1) {
-               ehca_warn(ibdev, "Unsupported class_version=%x",
-                         in_perf->mad_hdr.class_version);
-               out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
-               goto perf_reply;
-       }
-
-       switch (in_perf->mad_hdr.method) {
-       case IB_MGMT_METHOD_GET:
-       case IB_MGMT_METHOD_SET:
-               /* set class port info for redirection */
-               out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
-               out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
-               memset(poi, 0, sizeof(*poi));
-               poi->base_version = 1;
-               poi->class_version = 1;
-               poi->resp_time_value = 18;
-
-               /* copy local routing information from WC where applicable */
-               tcslfl->sl         = in_wc->sl;
-               poi->redirect_lid  =
-                       sport->saved_attr.lid | in_wc->dlid_path_bits;
-               poi->redirect_qp   = sport->pma_qp_nr;
-               poi->redirect_qkey = IB_QP1_QKEY;
-
-               ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
-                               &poi->redirect_pkey);
-
-               /* if request was globally routed, copy route info */
-               if (in_grh) {
-                       const struct vertcfl *vertcfl =
-                               (const struct vertcfl *)&in_grh->version_tclass_flow;
-                       memcpy(poi->redirect_gid, in_grh->dgid.raw,
-                              sizeof(poi->redirect_gid));
-                       tcslfl->tc        = vertcfl->tc;
-                       tcslfl->fl        = vertcfl->fl;
-               } else
-                       /* else only fill in default GID */
-                       ehca_query_gid(ibdev, port_num, 0,
-                                      (union ib_gid *)&poi->redirect_gid);
-
-               ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
-                        sport->saved_attr.lid, sport->pma_qp_nr);
-               break;
-
-       case IB_MGMT_METHOD_GET_RESP:
-               return IB_MAD_RESULT_FAILURE;
-
-       default:
-               out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
-               break;
-       }
-
-perf_reply:
-       out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
-
-       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                    const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                    const struct ib_mad_hdr *in, size_t in_mad_size,
-                    struct ib_mad_hdr *out, size_t *out_mad_size,
-                    u16 *out_mad_pkey_index)
-{
-       int ret;
-       const struct ib_mad *in_mad = (const struct ib_mad *)in;
-       struct ib_mad *out_mad = (struct ib_mad *)out;
-
-       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-                        *out_mad_size != sizeof(*out_mad)))
-               return IB_MAD_RESULT_FAILURE;
-
-       if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
-               return IB_MAD_RESULT_FAILURE;
-
-       /* accept only pma request */
-       if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
-               return IB_MAD_RESULT_SUCCESS;
-
-       ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
-       ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
-                               in_mad, out_mad);
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
deleted file mode 100644 (file)
index d280b12..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  auxiliary functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef EHCA_TOOLS_H
-#define EHCA_TOOLS_H
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/device.h>
-
-#include <linux/atomic.h>
-#include <asm/ibmebus.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/hvcall.h>
-
-extern int ehca_debug_level;
-
-#define ehca_dbg(ib_dev, format, arg...) \
-       do { \
-               if (unlikely(ehca_debug_level)) \
-                       dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
-                                  "PU%04x EHCA_DBG:%s " format "\n", \
-                                  raw_smp_processor_id(), __func__, \
-                                  ## arg); \
-       } while (0)
-
-#define ehca_info(ib_dev, format, arg...) \
-       dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
-                raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_warn(ib_dev, format, arg...) \
-       dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
-                raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_err(ib_dev, format, arg...) \
-       dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
-               raw_smp_processor_id(), __func__, ## arg)
-
-/* use this one only if no ib_dev available */
-#define ehca_gen_dbg(format, arg...) \
-       do { \
-               if (unlikely(ehca_debug_level)) \
-                       printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
-                              raw_smp_processor_id(), __func__, ## arg); \
-       } while (0)
-
-#define ehca_gen_warn(format, arg...) \
-       printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
-              raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_gen_err(format, arg...) \
-       printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
-              raw_smp_processor_id(), __func__, ## arg)
-
-/**
- * ehca_dmp - printk a memory block, whose length is n*8 bytes.
- * Each line has the following layout:
- * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
- */
-#define ehca_dmp(adr, len, format, args...) \
-       do { \
-               unsigned int x; \
-               unsigned int l = (unsigned int)(len); \
-               unsigned char *deb = (unsigned char *)(adr); \
-               for (x = 0; x < l; x += 16) { \
-                       printk(KERN_INFO "EHCA_DMP:%s " format \
-                              " adr=%p ofs=%04x %016llx %016llx\n", \
-                              __func__, ##args, deb, x, \
-                              *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
-                       deb += 16; \
-               } \
-       } while (0)
-
-/* define a bitmask, little endian version */
-#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
-
-/* define a bitmask, the ibm way... */
-#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
-
-/* internal function, don't use */
-#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
-
-/* internal function, don't use */
-#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
-
-/**
- * EHCA_BMASK_SET - return value shifted and masked by mask
- * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
- * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
- * in variable
- */
-#define EHCA_BMASK_SET(mask, value) \
-       ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
-
-/**
- * EHCA_BMASK_GET - extract a parameter from value by mask
- */
-#define EHCA_BMASK_GET(mask, value) \
-       (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
-
-/* Converts ehca to ib return code */
-int ehca2ib_return_code(u64 ehca_rc);
-
-#endif /* EHCA_TOOLS_H */
diff --git a/drivers/staging/rdma/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
deleted file mode 100644 (file)
index 1a1d5d9..0000000
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  userspace support verbs
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
-                                       struct ib_udata *udata)
-{
-       struct ehca_ucontext *my_context;
-
-       my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
-       if (!my_context) {
-               ehca_err(device, "Out of memory device=%p", device);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       return &my_context->ib_ucontext;
-}
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context)
-{
-       kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
-       return 0;
-}
-
-static void ehca_mm_open(struct vm_area_struct *vma)
-{
-       u32 *count = (u32 *)vma->vm_private_data;
-       if (!count) {
-               ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
-                            vma->vm_start, vma->vm_end);
-               return;
-       }
-       (*count)++;
-       if (!(*count))
-               ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
-                            vma->vm_start, vma->vm_end);
-       ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
-                    vma->vm_start, vma->vm_end, *count);
-}
-
-static void ehca_mm_close(struct vm_area_struct *vma)
-{
-       u32 *count = (u32 *)vma->vm_private_data;
-       if (!count) {
-               ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
-                            vma->vm_start, vma->vm_end);
-               return;
-       }
-       (*count)--;
-       ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
-                    vma->vm_start, vma->vm_end, *count);
-}
-
-static const struct vm_operations_struct vm_ops = {
-       .open = ehca_mm_open,
-       .close = ehca_mm_close,
-};
-
-static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
-                       u32 *mm_count)
-{
-       int ret;
-       u64 vsize, physical;
-
-       vsize = vma->vm_end - vma->vm_start;
-       if (vsize < EHCA_PAGESIZE) {
-               ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
-               return -EINVAL;
-       }
-
-       physical = galpas->user.fw_handle;
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
-       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
-       ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
-                          vma->vm_page_prot);
-       if (unlikely(ret)) {
-               ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
-               return -ENOMEM;
-       }
-
-       vma->vm_private_data = mm_count;
-       (*mm_count)++;
-       vma->vm_ops = &vm_ops;
-
-       return 0;
-}
-
-static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
-                          u32 *mm_count)
-{
-       int ret;
-       u64 start, ofs;
-       struct page *page;
-
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       start = vma->vm_start;
-       for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
-               u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
-               page = virt_to_page(virt_addr);
-               ret = vm_insert_page(vma, start, page);
-               if (unlikely(ret)) {
-                       ehca_gen_err("vm_insert_page() failed rc=%i", ret);
-                       return ret;
-               }
-               start += PAGE_SIZE;
-       }
-       vma->vm_private_data = mm_count;
-       (*mm_count)++;
-       vma->vm_ops = &vm_ops;
-
-       return 0;
-}
-
-static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
-                       u32 rsrc_type)
-{
-       int ret;
-
-       switch (rsrc_type) {
-       case 0: /* galpa fw handle */
-               ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
-               ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
-               if (unlikely(ret)) {
-                       ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_fw() failed rc=%i cq_num=%x",
-                                ret, cq->cq_number);
-                       return ret;
-               }
-               break;
-
-       case 1: /* cq queue_addr */
-               ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
-               ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
-               if (unlikely(ret)) {
-                       ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_queue() failed rc=%i cq_num=%x",
-                                ret, cq->cq_number);
-                       return ret;
-               }
-               break;
-
-       default:
-               ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
-                        rsrc_type, cq->cq_number);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
-                       u32 rsrc_type)
-{
-       int ret;
-
-       switch (rsrc_type) {
-       case 0: /* galpa fw handle */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
-               ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "remap_pfn_range() failed ret=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return -ENOMEM;
-               }
-               break;
-
-       case 1: /* qp rqueue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
-               ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
-                                     &qp->mm_count_rqueue);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return ret;
-               }
-               break;
-
-       case 2: /* qp squeue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
-               ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
-                                     &qp->mm_count_squeue);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return ret;
-               }
-               break;
-
-       default:
-               ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
-                        rsrc_type, qp->ib_qp.qp_num);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-       u64 fileoffset = vma->vm_pgoff;
-       u32 idr_handle = fileoffset & 0x1FFFFFF;
-       u32 q_type = (fileoffset >> 27) & 0x1;    /* CQ, QP,...        */
-       u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
-       u32 ret;
-       struct ehca_cq *cq;
-       struct ehca_qp *qp;
-       struct ib_uobject *uobject;
-
-       switch (q_type) {
-       case  0: /* CQ */
-               read_lock(&ehca_cq_idr_lock);
-               cq = idr_find(&ehca_cq_idr, idr_handle);
-               read_unlock(&ehca_cq_idr_lock);
-
-               /* make sure this mmap really belongs to the authorized user */
-               if (!cq)
-                       return -EINVAL;
-
-               if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
-                       return -EINVAL;
-
-               ret = ehca_mmap_cq(vma, cq, rsrc_type);
-               if (unlikely(ret)) {
-                       ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_cq() failed rc=%i cq_num=%x",
-                                ret, cq->cq_number);
-                       return ret;
-               }
-               break;
-
-       case 1: /* QP */
-               read_lock(&ehca_qp_idr_lock);
-               qp = idr_find(&ehca_qp_idr, idr_handle);
-               read_unlock(&ehca_qp_idr_lock);
-
-               /* make sure this mmap really belongs to the authorized user */
-               if (!qp)
-                       return -EINVAL;
-
-               uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
-               if (!uobject || uobject->context != context)
-                       return -EINVAL;
-
-               ret = ehca_mmap_qp(vma, qp, rsrc_type);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_qp() failed rc=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return ret;
-               }
-               break;
-
-       default:
-               ehca_gen_err("bad queue type %x", q_type);
-               return -EINVAL;
-       }
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
deleted file mode 100644 (file)
index 89517ff..0000000
+++ /dev/null
@@ -1,949 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware Infiniband Interface code for POWER
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm/hvcall.h>
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hcp_phyp.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define H_ALL_RES_QP_ENHANCED_OPS       EHCA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_PTE_PIN            EHCA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_SERVICE_TYPE       EHCA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_STORAGE            EHCA_BMASK_IBM(16, 17)
-#define H_ALL_RES_QP_LL_RQ_CQE_POSTING  EHCA_BMASK_IBM(18, 18)
-#define H_ALL_RES_QP_LL_SQ_CQE_POSTING  EHCA_BMASK_IBM(19, 21)
-#define H_ALL_RES_QP_SIGNALING_TYPE     EHCA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_UD_AV_LKEY_CTRL    EHCA_BMASK_IBM(31, 31)
-#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
-#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
-#define H_ALL_RES_QP_RESOURCE_TYPE      EHCA_BMASK_IBM(56, 63)
-
-#define H_ALL_RES_QP_MAX_OUTST_SEND_WR  EHCA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_MAX_OUTST_RECV_WR  EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_MAX_SEND_SGE       EHCA_BMASK_IBM(32, 39)
-#define H_ALL_RES_QP_MAX_RECV_SGE       EHCA_BMASK_IBM(40, 47)
-
-#define H_ALL_RES_QP_UD_AV_LKEY         EHCA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SRQ_QP_TOKEN       EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SRQ_QP_HANDLE      EHCA_BMASK_IBM(0, 64)
-#define H_ALL_RES_QP_SRQ_LIMIT          EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_SRQ_QPN            EHCA_BMASK_IBM(40, 63)
-
-#define H_ALL_RES_QP_ACT_OUTST_SEND_WR  EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_OUTST_RECV_WR  EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_ACT_SEND_SGE       EHCA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_RECV_SGE       EHCA_BMASK_IBM(24, 31)
-
-#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(32, 63)
-
-#define H_MP_INIT_TYPE                  EHCA_BMASK_IBM(44, 47)
-#define H_MP_SHUTDOWN                   EHCA_BMASK_IBM(48, 48)
-#define H_MP_RESET_QKEY_CTR             EHCA_BMASK_IBM(49, 49)
-
-#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
-#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
-#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
-
-static DEFINE_SPINLOCK(hcall_lock);
-
-static long ehca_plpar_hcall_norets(unsigned long opcode,
-                                   unsigned long arg1,
-                                   unsigned long arg2,
-                                   unsigned long arg3,
-                                   unsigned long arg4,
-                                   unsigned long arg5,
-                                   unsigned long arg6,
-                                   unsigned long arg7)
-{
-       long ret;
-       int i, sleep_msecs;
-       unsigned long flags = 0;
-
-       if (unlikely(ehca_debug_level >= 2))
-               ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
-                            opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
-
-       for (i = 0; i < 5; i++) {
-               /* serialize hCalls to work around firmware issue */
-               if (ehca_lock_hcalls)
-                       spin_lock_irqsave(&hcall_lock, flags);
-
-               ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
-                                        arg5, arg6, arg7);
-
-               if (ehca_lock_hcalls)
-                       spin_unlock_irqrestore(&hcall_lock, flags);
-
-               if (H_IS_LONG_BUSY(ret)) {
-                       sleep_msecs = get_longbusy_msecs(ret);
-                       msleep_interruptible(sleep_msecs);
-                       continue;
-               }
-
-               if (ret < H_SUCCESS)
-                       ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
-                                    opcode, ret, arg1, arg2, arg3,
-                                    arg4, arg5, arg6, arg7);
-               else
-                       if (unlikely(ehca_debug_level >= 2))
-                               ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
-
-               return ret;
-       }
-
-       return H_BUSY;
-}
-
-static long ehca_plpar_hcall9(unsigned long opcode,
-                             unsigned long *outs, /* array of 9 outputs */
-                             unsigned long arg1,
-                             unsigned long arg2,
-                             unsigned long arg3,
-                             unsigned long arg4,
-                             unsigned long arg5,
-                             unsigned long arg6,
-                             unsigned long arg7,
-                             unsigned long arg8,
-                             unsigned long arg9)
-{
-       long ret;
-       int i, sleep_msecs;
-       unsigned long flags = 0;
-
-       if (unlikely(ehca_debug_level >= 2))
-               ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
-                            arg1, arg2, arg3, arg4, arg5,
-                            arg6, arg7, arg8, arg9);
-
-       for (i = 0; i < 5; i++) {
-               /* serialize hCalls to work around firmware issue */
-               if (ehca_lock_hcalls)
-                       spin_lock_irqsave(&hcall_lock, flags);
-
-               ret = plpar_hcall9(opcode, outs,
-                                  arg1, arg2, arg3, arg4, arg5,
-                                  arg6, arg7, arg8, arg9);
-
-               if (ehca_lock_hcalls)
-                       spin_unlock_irqrestore(&hcall_lock, flags);
-
-               if (H_IS_LONG_BUSY(ret)) {
-                       sleep_msecs = get_longbusy_msecs(ret);
-                       msleep_interruptible(sleep_msecs);
-                       continue;
-               }
-
-               if (ret < H_SUCCESS) {
-                       ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
-                                    opcode, arg1, arg2, arg3, arg4, arg5,
-                                    arg6, arg7, arg8, arg9);
-                       ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
-                                    ret, outs[0], outs[1], outs[2], outs[3],
-                                    outs[4], outs[5], outs[6], outs[7],
-                                    outs[8]);
-               } else if (unlikely(ehca_debug_level >= 2))
-                       ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
-                                    ret, outs[0], outs[1], outs[2], outs[3],
-                                    outs[4], outs[5], outs[6], outs[7],
-                                    outs[8]);
-               return ret;
-       }
-
-       return H_BUSY;
-}
-
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u32 neq_control,
-                            const u32 number_of_entries,
-                            struct ipz_eq_handle *eq_handle,
-                            u32 *act_nr_of_entries,
-                            u32 *act_pages,
-                            u32 *eq_ist)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-       u64 allocate_controls;
-
-       /* resource type */
-       allocate_controls = 3ULL;
-
-       /* ISN is associated */
-       if (neq_control != 1)
-               allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
-       else /* notification event queue */
-               allocate_controls = (1ULL << 63) | allocate_controls;
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,  /* r4 */
-                               allocate_controls,      /* r5 */
-                               number_of_entries,      /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       eq_handle->handle = outs[0];
-       *act_nr_of_entries = (u32)outs[3];
-       *act_pages = (u32)outs[4];
-       *eq_ist = (u32)outs[5];
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resource - ret=%lli ", ret);
-
-       return ret;
-}
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
-                      struct ipz_eq_handle eq_handle,
-                      const u64 event_mask)
-{
-       return ehca_plpar_hcall_norets(H_RESET_EVENTS,
-                                      adapter_handle.handle, /* r4 */
-                                      eq_handle.handle,      /* r5 */
-                                      event_mask,            /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_cq *cq,
-                            struct ehca_alloc_cq_parms *param)
-{
-       int rc;
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,   /* r4  */
-                               2,                       /* r5  */
-                               param->eq_handle.handle, /* r6  */
-                               cq->token,               /* r7  */
-                               param->nr_cqe,           /* r8  */
-                               0, 0, 0, 0);
-       cq->ipz_cq_handle.handle = outs[0];
-       param->act_nr_of_entries = (u32)outs[3];
-       param->act_pages = (u32)outs[4];
-
-       if (ret == H_SUCCESS) {
-               rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
-               if (rc) {
-                       ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
-                                    rc, outs[5]);
-
-                       ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                               adapter_handle.handle,     /* r4 */
-                                               cq->ipz_cq_handle.handle,  /* r5 */
-                                               0, 0, 0, 0, 0);
-                       ret = H_NO_MEM;
-               }
-       }
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_alloc_qp_parms *parms, int is_user)
-{
-       int rc;
-       u64 ret;
-       u64 allocate_controls, max_r10_reg, r11, r12;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       allocate_controls =
-               EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
-                                parms->squeue.page_size)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
-                                parms->rqueue.page_size)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
-                                !!(parms->ll_comp_flags & LLQP_RECV_COMP))
-               | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
-                                !!(parms->ll_comp_flags & LLQP_SEND_COMP))
-               | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
-                                parms->ud_av_l_key_ctl)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
-
-       max_r10_reg =
-               EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
-                              parms->squeue.max_wr + 1)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
-                                parms->rqueue.max_wr + 1)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
-                                parms->squeue.max_sge)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
-                                parms->rqueue.max_sge);
-
-       r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
-
-       if (parms->ext_type == EQPT_SRQ)
-               r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
-       else
-               r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,             /* r4  */
-                               allocate_controls,                 /* r5  */
-                               parms->send_cq_handle.handle,
-                               parms->recv_cq_handle.handle,
-                               parms->eq_handle.handle,
-                               ((u64)parms->token << 32) | parms->pd.value,
-                               max_r10_reg, r11, r12);
-
-       parms->qp_handle.handle = outs[0];
-       parms->real_qp_num = (u32)outs[1];
-       parms->squeue.act_nr_wqes =
-               (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
-       parms->rqueue.act_nr_wqes =
-               (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
-       parms->squeue.act_nr_sges =
-               (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
-       parms->rqueue.act_nr_sges =
-               (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
-       parms->squeue.queue_size =
-               (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
-       parms->rqueue.queue_size =
-               (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
-
-       if (ret == H_SUCCESS) {
-               rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
-               if (rc) {
-                       ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
-                                    rc, outs[6]);
-
-                       ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                               adapter_handle.handle,     /* r4 */
-                                               parms->qp_handle.handle,  /* r5 */
-                                               0, 0, 0, 0, 0);
-                       ret = H_NO_MEM;
-               }
-       }
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
-                     const u8 port_id,
-                     struct hipz_query_port *query_port_response_block)
-{
-       u64 ret;
-       u64 r_cb = __pa(query_port_response_block);
-
-       if (r_cb & (EHCA_PAGESIZE-1)) {
-               ehca_gen_err("response block not page aligned");
-               return H_PARAMETER;
-       }
-
-       ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
-                                     adapter_handle.handle, /* r4 */
-                                     port_id,               /* r5 */
-                                     r_cb,                  /* r6 */
-                                     0, 0, 0, 0);
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(query_port_response_block, 64, "response_block");
-
-       return ret;
-}
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
-                      const u8 port_id, const u32 port_cap,
-                      const u8 init_type, const int modify_mask)
-{
-       u64 port_attributes = port_cap;
-
-       if (modify_mask & IB_PORT_SHUTDOWN)
-               port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
-       if (modify_mask & IB_PORT_INIT_TYPE)
-               port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
-       if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
-               port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
-
-       return ehca_plpar_hcall_norets(H_MODIFY_PORT,
-                                      adapter_handle.handle, /* r4 */
-                                      port_id,               /* r5 */
-                                      port_attributes,       /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
-                    struct hipz_query_hca *query_hca_rblock)
-{
-       u64 r_cb = __pa(query_hca_rblock);
-
-       if (r_cb & (EHCA_PAGESIZE-1)) {
-               ehca_gen_err("response_block=%p not page aligned",
-                            query_hca_rblock);
-               return H_PARAMETER;
-       }
-
-       return ehca_plpar_hcall_norets(H_QUERY_HCA,
-                                      adapter_handle.handle, /* r4 */
-                                      r_cb,                  /* r5 */
-                                      0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
-                         const u8 pagesize,
-                         const u8 queue_type,
-                         const u64 resource_handle,
-                         const u64 logical_address_of_page,
-                         u64 count)
-{
-       return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
-                                      adapter_handle.handle,      /* r4  */
-                                      (u64)queue_type | ((u64)pagesize) << 8,
-                                      /* r5  */
-                                      resource_handle,            /* r6  */
-                                      logical_address_of_page,    /* r7  */
-                                      count,                      /* r8  */
-                                      0, 0);
-}
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_eq_handle eq_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count)
-{
-       if (count != 1) {
-               ehca_gen_err("Ppage counter=%llx", count);
-               return H_PARAMETER;
-       }
-       return hipz_h_register_rpage(adapter_handle,
-                                    pagesize,
-                                    queue_type,
-                                    eq_handle.handle,
-                                    logical_address_of_page, count);
-}
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
-                          u32 ist)
-{
-       u64 ret;
-       ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
-                                     adapter_handle.handle, /* r4 */
-                                     ist,                   /* r5 */
-                                     0, 0, 0, 0, 0);
-
-       if (ret != H_SUCCESS && ret != H_BUSY)
-               ehca_gen_err("Could not query interrupt state.");
-
-       return ret;
-}
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_cq_handle cq_handle,
-                            struct ehca_pfcq *pfcq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa gal)
-{
-       if (count != 1) {
-               ehca_gen_err("Page counter=%llx", count);
-               return H_PARAMETER;
-       }
-
-       return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-                                    cq_handle.handle, logical_address_of_page,
-                                    count);
-}
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_qp_handle qp_handle,
-                            struct ehca_pfqp *pfqp,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa galpa)
-{
-       if (count > 1) {
-               ehca_gen_err("Page counter=%llx", count);
-               return H_PARAMETER;
-       }
-
-       return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-                                    qp_handle.handle, logical_address_of_page,
-                                    count);
-}
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
-                              const struct ipz_qp_handle qp_handle,
-                              struct ehca_pfqp *pfqp,
-                              void **log_addr_next_sq_wqe2processed,
-                              void **log_addr_next_rq_wqe2processed,
-                              int dis_and_get_function_code)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
-                               adapter_handle.handle,     /* r4 */
-                               dis_and_get_function_code, /* r5 */
-                               qp_handle.handle,          /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       if (log_addr_next_sq_wqe2processed)
-               *log_addr_next_sq_wqe2processed = (void *)outs[0];
-       if (log_addr_next_rq_wqe2processed)
-               *log_addr_next_rq_wqe2processed = (void *)outs[1];
-
-       return ret;
-}
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
-                    const struct ipz_qp_handle qp_handle,
-                    struct ehca_pfqp *pfqp,
-                    const u64 update_mask,
-                    struct hcp_modify_qp_control_block *mqpcb,
-                    struct h_galpa gal)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-       ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
-                               adapter_handle.handle, /* r4 */
-                               qp_handle.handle,      /* r5 */
-                               update_mask,           /* r6 */
-                               __pa(mqpcb),           /* r7 */
-                               0, 0, 0, 0, 0);
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Insufficient resources ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
-                   const struct ipz_qp_handle qp_handle,
-                   struct ehca_pfqp *pfqp,
-                   struct hcp_modify_qp_control_block *qqpcb,
-                   struct h_galpa gal)
-{
-       return ehca_plpar_hcall_norets(H_QUERY_QP,
-                                      adapter_handle.handle, /* r4 */
-                                      qp_handle.handle,      /* r5 */
-                                      __pa(qqpcb),           /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_qp *qp)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = hcp_galpas_dtor(&qp->galpas);
-       if (ret) {
-               ehca_gen_err("Could not destruct qp->galpas");
-               return H_RESOURCE;
-       }
-       ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
-                               adapter_handle.handle,     /* r4 */
-                               /* function code */
-                               1,                         /* r5 */
-                               qp->ipz_qp_handle.handle,  /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       if (ret == H_HARDWARE)
-               ehca_gen_err("HCA not operational. ret=%lli", ret);
-
-       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                     adapter_handle.handle,     /* r4 */
-                                     qp->ipz_qp_handle.handle,  /* r5 */
-                                     0, 0, 0, 0, 0);
-
-       if (ret == H_RESOURCE)
-               ehca_gen_err("Resource still in use. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port)
-{
-       return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
-                                      adapter_handle.handle, /* r4 */
-                                      qp_handle.handle,      /* r5 */
-                                      port,                  /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port, u32 * pma_qp_nr,
-                      u32 * bma_qp_nr)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
-                               adapter_handle.handle, /* r4 */
-                               qp_handle.handle,      /* r5 */
-                               port,                  /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       *pma_qp_nr = (u32)outs[0];
-       *bma_qp_nr = (u32)outs[1];
-
-       if (ret == H_ALIAS_EXIST)
-               ehca_gen_err("AQP1 already exists. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id)
-{
-       u64 ret;
-
-       ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
-                                     adapter_handle.handle,  /* r4 */
-                                     qp_handle.handle,       /* r5 */
-                                     mcg_dlid,               /* r6 */
-                                     interface_id,           /* r7 */
-                                     subnet_prefix,          /* r8 */
-                                     0, 0);
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id)
-{
-       return ehca_plpar_hcall_norets(H_DETACH_MCQP,
-                                      adapter_handle.handle, /* r4 */
-                                      qp_handle.handle,      /* r5 */
-                                      mcg_dlid,              /* r6 */
-                                      interface_id,          /* r7 */
-                                      subnet_prefix,         /* r8 */
-                                      0, 0);
-}
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_cq *cq,
-                     u8 force_flag)
-{
-       u64 ret;
-
-       ret = hcp_galpas_dtor(&cq->galpas);
-       if (ret) {
-               ehca_gen_err("Could not destruct cp->galpas");
-               return H_RESOURCE;
-       }
-
-       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                     adapter_handle.handle,     /* r4 */
-                                     cq->ipz_cq_handle.handle,  /* r5 */
-                                     force_flag != 0 ? 1L : 0L, /* r6 */
-                                     0, 0, 0, 0);
-
-       if (ret == H_RESOURCE)
-               ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
-
-       return ret;
-}
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_eq *eq)
-{
-       u64 ret;
-
-       ret = hcp_galpas_dtor(&eq->galpas);
-       if (ret) {
-               ehca_gen_err("Could not destruct eq->galpas");
-               return H_RESOURCE;
-       }
-
-       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                     adapter_handle.handle,     /* r4 */
-                                     eq->ipz_eq_handle.handle,  /* r5 */
-                                     0, 0, 0, 0, 0);
-
-       if (ret == H_RESOURCE)
-               ehca_gen_err("Resource in use. ret=%lli ", ret);
-
-       return ret;
-}
-
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u64 vaddr,
-                            const u64 length,
-                            const u32 access_ctrl,
-                            const struct ipz_pd pd,
-                            struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,            /* r4 */
-                               5,                                /* r5 */
-                               vaddr,                            /* r6 */
-                               length,                           /* r7 */
-                               (((u64)access_ctrl) << 32ULL),    /* r8 */
-                               pd.value,                         /* r9 */
-                               0, 0, 0);
-       outparms->handle.handle = outs[0];
-       outparms->lkey = (u32)outs[2];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count)
-{
-       u64 ret;
-
-       if (unlikely(ehca_debug_level >= 3)) {
-               if (count > 1) {
-                       u64 *kpage;
-                       int i;
-                       kpage = __va(logical_address_of_page);
-                       for (i = 0; i < count; i++)
-                               ehca_gen_dbg("kpage[%d]=%p",
-                                            i, (void *)kpage[i]);
-               } else
-                       ehca_gen_dbg("kpage=%p",
-                                    (void *)logical_address_of_page);
-       }
-
-       if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
-               ehca_gen_err("logical_address_of_page not on a 4k boundary "
-                            "adapter_handle=%llx mr=%p mr_handle=%llx "
-                            "pagesize=%x queue_type=%x "
-                            "logical_address_of_page=%llx count=%llx",
-                            adapter_handle.handle, mr,
-                            mr->ipz_mr_handle.handle, pagesize, queue_type,
-                            logical_address_of_page, count);
-               ret = H_PARAMETER;
-       } else
-               ret = hipz_h_register_rpage(adapter_handle, pagesize,
-                                           queue_type,
-                                           mr->ipz_mr_handle.handle,
-                                           logical_address_of_page, count);
-       return ret;
-}
-
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mr *mr,
-                   struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
-                               adapter_handle.handle,     /* r4 */
-                               mr->ipz_mr_handle.handle,  /* r5 */
-                               0, 0, 0, 0, 0, 0, 0);
-       outparms->len = outs[0];
-       outparms->vaddr = outs[1];
-       outparms->acl  = outs[4] >> 32;
-       outparms->lkey = (u32)(outs[5] >> 32);
-       outparms->rkey = (u32)(outs[5] & (0xffffffff));
-
-       return ret;
-}
-
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mr *mr)
-{
-       return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                      adapter_handle.handle,    /* r4 */
-                                      mr->ipz_mr_handle.handle, /* r5 */
-                                      0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
-                         const struct ehca_mr *mr,
-                         const u64 vaddr_in,
-                         const u64 length,
-                         const u32 access_ctrl,
-                         const struct ipz_pd pd,
-                         const u64 mr_addr_cb,
-                         struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
-                               adapter_handle.handle,    /* r4 */
-                               mr->ipz_mr_handle.handle, /* r5 */
-                               vaddr_in,                 /* r6 */
-                               length,                   /* r7 */
-                               /* r8 */
-                               ((((u64)access_ctrl) << 32ULL) | pd.value),
-                               mr_addr_cb,               /* r9 */
-                               0, 0, 0);
-       outparms->vaddr = outs[1];
-       outparms->lkey = (u32)outs[2];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
-                       const struct ehca_mr *mr,
-                       const struct ehca_mr *orig_mr,
-                       const u64 vaddr_in,
-                       const u32 access_ctrl,
-                       const struct ipz_pd pd,
-                       struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
-                               adapter_handle.handle,            /* r4 */
-                               orig_mr->ipz_mr_handle.handle,    /* r5 */
-                               vaddr_in,                         /* r6 */
-                               (((u64)access_ctrl) << 32ULL),    /* r7 */
-                               pd.value,                         /* r8 */
-                               0, 0, 0, 0);
-       outparms->handle.handle = outs[0];
-       outparms->lkey = (u32)outs[2];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mw *mw,
-                            const struct ipz_pd pd,
-                            struct ehca_mw_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,      /* r4 */
-                               6,                          /* r5 */
-                               pd.value,                   /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       outparms->handle.handle = outs[0];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mw *mw,
-                   struct ehca_mw_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
-                               adapter_handle.handle,    /* r4 */
-                               mw->ipz_mw_handle.handle, /* r5 */
-                               0, 0, 0, 0, 0, 0, 0);
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mw *mw)
-{
-       return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                      adapter_handle.handle,    /* r4 */
-                                      mw->ipz_mw_handle.handle, /* r5 */
-                                      0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
-                     const u64 ressource_handle,
-                     void *rblock,
-                     unsigned long *byte_count)
-{
-       u64 r_cb = __pa(rblock);
-
-       if (r_cb & (EHCA_PAGESIZE-1)) {
-               ehca_gen_err("rblock not page aligned.");
-               return H_PARAMETER;
-       }
-
-       return ehca_plpar_hcall_norets(H_ERROR_DATA,
-                                      adapter_handle.handle,
-                                      ressource_handle,
-                                      r_cb,
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_eoi(int irq)
-{
-       unsigned long xirr;
-
-       iosync();
-       xirr = (0xffULL << 24) | irq;
-
-       return plpar_hcall_norets(H_EOI, xirr);
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
deleted file mode 100644 (file)
index a46e514..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware Infiniband Interface code for POWER
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_IF_H__
-#define __HCP_IF_H__
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "hipz_hw.h"
-
-/*
- * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
- * resources, create the empty EQPT (ring).
- */
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u32 neq_control,
-                            const u32 number_of_entries,
-                            struct ipz_eq_handle *eq_handle,
-                            u32 * act_nr_of_entries,
-                            u32 * act_pages,
-                            u32 * eq_ist);
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
-                      struct ipz_eq_handle eq_handle,
-                      const u64 event_mask);
-/*
- * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
- * resources, create the empty CQPT (ring).
- */
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_cq *cq,
-                            struct ehca_alloc_cq_parms *param);
-
-
-/*
- * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
- * initialize resources, create empty QPPTs (2 rings).
- */
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_alloc_qp_parms *parms, int is_user);
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
-                     const u8 port_id,
-                     struct hipz_query_port *query_port_response_block);
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
-                      const u8 port_id, const u32 port_cap,
-                      const u8 init_type, const int modify_mask);
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
-                    struct hipz_query_hca *query_hca_rblock);
-
-/*
- * hipz_h_register_rpage internal function in hcp_if.h for all
- * hcp_H_REGISTER_RPAGE calls.
- */
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
-                         const u8 pagesize,
-                         const u8 queue_type,
-                         const u64 resource_handle,
-                         const u64 logical_address_of_page,
-                         u64 count);
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_eq_handle eq_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count);
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle
-                          hcp_adapter_handle,
-                          u32 ist);
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_cq_handle cq_handle,
-                            struct ehca_pfcq *pfcq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa gal);
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_qp_handle qp_handle,
-                            struct ehca_pfqp *pfqp,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa galpa);
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
-                              const struct ipz_qp_handle qp_handle,
-                              struct ehca_pfqp *pfqp,
-                              void **log_addr_next_sq_wqe_tb_processed,
-                              void **log_addr_next_rq_wqe_tb_processed,
-                              int dis_and_get_function_code);
-enum hcall_sigt {
-       HCALL_SIGT_NO_CQE = 0,
-       HCALL_SIGT_BY_WQE = 1,
-       HCALL_SIGT_EVERY = 2
-};
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
-                    const struct ipz_qp_handle qp_handle,
-                    struct ehca_pfqp *pfqp,
-                    const u64 update_mask,
-                    struct hcp_modify_qp_control_block *mqpcb,
-                    struct h_galpa gal);
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
-                   const struct ipz_qp_handle qp_handle,
-                   struct ehca_pfqp *pfqp,
-                   struct hcp_modify_qp_control_block *qqpcb,
-                   struct h_galpa gal);
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_qp *qp);
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port);
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port, u32 * pma_qp_nr,
-                      u32 * bma_qp_nr);
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_cq *cq,
-                     u8 force_flag);
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_eq *eq);
-
-/*
- * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u64 vaddr,
-                            const u64 length,
-                            const u32 access_ctrl,
-                            const struct ipz_pd pd,
-                            struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count);
-
-/* hipz_h_query_mr queries MR in HW and FW */
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mr *mr,
-                   struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mr frees MR resources in HW and FW */
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mr *mr);
-
-/* hipz_h_reregister_pmr reregisters MR in HW and FW */
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
-                         const struct ehca_mr *mr,
-                         const u64 vaddr_in,
-                         const u64 length,
-                         const u32 access_ctrl,
-                         const struct ipz_pd pd,
-                         const u64 mr_addr_cb,
-                         struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_smr register shared MR in HW and FW */
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
-                       const struct ehca_mr *mr,
-                       const struct ehca_mr *orig_mr,
-                       const u64 vaddr_in,
-                       const u32 access_ctrl,
-                       const struct ipz_pd pd,
-                       struct ehca_mr_hipzout_parms *outparms);
-
-/*
- * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mw *mw,
-                            const struct ipz_pd pd,
-                            struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_query_mw queries MW in HW and FW */
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mw *mw,
-                   struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mw frees MW resources in HW and FW */
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mw *mw);
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
-                     const u64 ressource_handle,
-                     void *rblock,
-                     unsigned long *byte_count);
-u64 hipz_h_eoi(int irq);
-
-#endif /* __HCP_IF_H__ */
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
deleted file mode 100644 (file)
index 077376f..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *   load store abstraction for ehca register access with tracing
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-u64 hcall_map_page(u64 physaddr)
-{
-       return (u64)ioremap(physaddr, EHCA_PAGESIZE);
-}
-
-int hcall_unmap_page(u64 mapaddr)
-{
-       iounmap((volatile void __iomem *) mapaddr);
-       return 0;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
-                   u64 paddr_kernel, u64 paddr_user)
-{
-       if (!is_user) {
-               galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
-               if (!galpas->kernel.fw_handle)
-                       return -ENOMEM;
-       } else
-               galpas->kernel.fw_handle = 0;
-
-       galpas->user.fw_handle = paddr_user;
-
-       return 0;
-}
-
-int hcp_galpas_dtor(struct h_galpas *galpas)
-{
-       if (galpas->kernel.fw_handle) {
-               int ret = hcall_unmap_page(galpas->kernel.fw_handle);
-               if (ret)
-                       return ret;
-       }
-
-       galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
deleted file mode 100644 (file)
index d1b0299..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware calls
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_PHYP_H__
-#define __HCP_PHYP_H__
-
-
-/*
- * eHCA page (mapped into memory)
- * resource to access eHCA register pages in CPU address space
-*/
-struct h_galpa {
-       u64 fw_handle;
-       /* for pSeries this is a 64bit memory address where
-          I/O memory is mapped into CPU address space (kv) */
-};
-
-/*
- * resource to access eHCA address space registers, all types
- */
-struct h_galpas {
-       u32 pid;                /*PID of userspace galpa checking */
-       struct h_galpa user;    /* user space accessible resource,
-                                  set to 0 if unused */
-       struct h_galpa kernel;  /* kernel space accessible resource,
-                                  set to 0 if unused */
-};
-
-static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
-{
-       u64 addr = galpa.fw_handle + offset;
-       return *(volatile u64 __force *)addr;
-}
-
-static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
-{
-       u64 addr = galpa.fw_handle + offset;
-       *(volatile u64 __force *)addr = value;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
-                   u64 paddr_kernel, u64 paddr_user);
-
-int hcp_galpas_dtor(struct h_galpas *galpas);
-
-u64 hcall_map_page(u64 physaddr);
-
-int hcall_unmap_page(u64 mapaddr);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
deleted file mode 100644 (file)
index 9dac93d..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HW abstraction register functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_H__
-#define __HIPZ_FNS_H__
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-#include "hipz_fns_core.h"
-
-#define hipz_galpa_store_eq(gal, offset, value) \
-       hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_eq(gal, offset) \
-       hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qped(gal, offset, value) \
-       hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_qped(gal, offset) \
-       hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
-
-#define hipz_galpa_store_mrmw(gal, offset, value) \
-       hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_mrmw(gal, offset) \
-       hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
deleted file mode 100644 (file)
index 868735f..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HW abstraction register functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_CORE_H__
-#define __HIPZ_FNS_CORE_H__
-
-#include "hcp_phyp.h"
-#include "hipz_hw.h"
-
-#define hipz_galpa_store_cq(gal, offset, value) \
-       hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_cq(gal, offset) \
-       hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qp(gal, offset, value) \
-       hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
-#define hipz_galpa_load_qp(gal, offset) \
-       hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
-
-static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
-{
-       /*  ringing doorbell :-) */
-       hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
-                           EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
-}
-
-static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
-{
-       /*  ringing doorbell :-) */
-       hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
-                           EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
-}
-
-static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
-{
-       hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
-                           EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
-}
-
-static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
-{
-       u64 cqx_n0_reg;
-
-       hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
-                           EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
-                                          value));
-       cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
-}
-
-static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
-{
-       u64 cqx_n1_reg;
-
-       hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
-                           EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
-       cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
-}
-
-#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/staging/rdma/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
deleted file mode 100644 (file)
index bf996c7..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  eHCA register definitions
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_HW_H__
-#define __HIPZ_HW_H__
-
-#include "ehca_tools.h"
-
-#define EHCA_MAX_MTU 4
-
-/* QP Table Entry Memory Map */
-struct hipz_qptemm {
-       u64 qpx_hcr;
-       u64 qpx_c;
-       u64 qpx_herr;
-       u64 qpx_aer;
-/* 0x20*/
-       u64 qpx_sqa;
-       u64 qpx_sqc;
-       u64 qpx_rqa;
-       u64 qpx_rqc;
-/* 0x40*/
-       u64 qpx_st;
-       u64 qpx_pmstate;
-       u64 qpx_pmfa;
-       u64 qpx_pkey;
-/* 0x60*/
-       u64 qpx_pkeya;
-       u64 qpx_pkeyb;
-       u64 qpx_pkeyc;
-       u64 qpx_pkeyd;
-/* 0x80*/
-       u64 qpx_qkey;
-       u64 qpx_dqp;
-       u64 qpx_dlidp;
-       u64 qpx_portp;
-/* 0xa0*/
-       u64 qpx_slidp;
-       u64 qpx_slidpp;
-       u64 qpx_dlida;
-       u64 qpx_porta;
-/* 0xc0*/
-       u64 qpx_slida;
-       u64 qpx_slidpa;
-       u64 qpx_slvl;
-       u64 qpx_ipd;
-/* 0xe0*/
-       u64 qpx_mtu;
-       u64 qpx_lato;
-       u64 qpx_rlimit;
-       u64 qpx_rnrlimit;
-/* 0x100*/
-       u64 qpx_t;
-       u64 qpx_sqhp;
-       u64 qpx_sqptp;
-       u64 qpx_nspsn;
-/* 0x120*/
-       u64 qpx_nspsnhwm;
-       u64 reserved1;
-       u64 qpx_sdsi;
-       u64 qpx_sdsbc;
-/* 0x140*/
-       u64 qpx_sqwsize;
-       u64 qpx_sqwts;
-       u64 qpx_lsn;
-       u64 qpx_nssn;
-/* 0x160 */
-       u64 qpx_mor;
-       u64 qpx_cor;
-       u64 qpx_sqsize;
-       u64 qpx_erc;
-/* 0x180*/
-       u64 qpx_rnrrc;
-       u64 qpx_ernrwt;
-       u64 qpx_rnrresp;
-       u64 qpx_lmsna;
-/* 0x1a0 */
-       u64 qpx_sqhpc;
-       u64 qpx_sqcptp;
-       u64 qpx_sigt;
-       u64 qpx_wqecnt;
-/* 0x1c0*/
-       u64 qpx_rqhp;
-       u64 qpx_rqptp;
-       u64 qpx_rqsize;
-       u64 qpx_nrr;
-/* 0x1e0*/
-       u64 qpx_rdmac;
-       u64 qpx_nrpsn;
-       u64 qpx_lapsn;
-       u64 qpx_lcr;
-/* 0x200*/
-       u64 qpx_rwc;
-       u64 qpx_rwva;
-       u64 qpx_rdsi;
-       u64 qpx_rdsbc;
-/* 0x220*/
-       u64 qpx_rqwsize;
-       u64 qpx_crmsn;
-       u64 qpx_rdd;
-       u64 qpx_larpsn;
-/* 0x240*/
-       u64 qpx_pd;
-       u64 qpx_scqn;
-       u64 qpx_rcqn;
-       u64 qpx_aeqn;
-/* 0x260*/
-       u64 qpx_aaelog;
-       u64 qpx_ram;
-       u64 qpx_rdmaqe0;
-       u64 qpx_rdmaqe1;
-/* 0x280*/
-       u64 qpx_rdmaqe2;
-       u64 qpx_rdmaqe3;
-       u64 qpx_nrpsnhwm;
-/* 0x298*/
-       u64 reserved[(0x400 - 0x298) / 8];
-/* 0x400 extended data */
-       u64 reserved_ext[(0x500 - 0x400) / 8];
-/* 0x500 */
-       u64 reserved2[(0x1000 - 0x500) / 8];
-/* 0x1000      */
-};
-
-#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
-
-#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
-
-/* MRMWPT Entry Memory Map */
-struct hipz_mrmwmm {
-       /* 0x00 */
-       u64 mrx_hcr;
-
-       u64 mrx_c;
-       u64 mrx_herr;
-       u64 mrx_aer;
-       /* 0x20 */
-       u64 mrx_pp;
-       u64 reserved1;
-       u64 reserved2;
-       u64 reserved3;
-       /* 0x40 */
-       u64 reserved4[(0x200 - 0x40) / 8];
-       /* 0x200 */
-       u64 mrx_ctl[64];
-
-};
-
-#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
-
-struct hipz_qpedmm {
-       /* 0x00 */
-       u64 reserved0[(0x400) / 8];
-       /* 0x400 */
-       u64 qpedx_phh;
-       u64 qpedx_ppsgp;
-       /* 0x410 */
-       u64 qpedx_ppsgu;
-       u64 qpedx_ppdgp;
-       /* 0x420 */
-       u64 qpedx_ppdgu;
-       u64 qpedx_aph;
-       /* 0x430 */
-       u64 qpedx_apsgp;
-       u64 qpedx_apsgu;
-       /* 0x440 */
-       u64 qpedx_apdgp;
-       u64 qpedx_apdgu;
-       /* 0x450 */
-       u64 qpedx_apav;
-       u64 qpedx_apsav;
-       /* 0x460  */
-       u64 qpedx_hcr;
-       u64 reserved1[4];
-       /* 0x488 */
-       u64 qpedx_rrl0;
-       /* 0x490 */
-       u64 qpedx_rrrkey0;
-       u64 qpedx_rrva0;
-       /* 0x4a0 */
-       u64 reserved2;
-       u64 qpedx_rrl1;
-       /* 0x4b0 */
-       u64 qpedx_rrrkey1;
-       u64 qpedx_rrva1;
-       /* 0x4c0 */
-       u64 reserved3;
-       u64 qpedx_rrl2;
-       /* 0x4d0 */
-       u64 qpedx_rrrkey2;
-       u64 qpedx_rrva2;
-       /* 0x4e0 */
-       u64 reserved4;
-       u64 qpedx_rrl3;
-       /* 0x4f0 */
-       u64 qpedx_rrrkey3;
-       u64 qpedx_rrva3;
-};
-
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
-
-/* CQ Table Entry Memory Map */
-struct hipz_cqtemm {
-       u64 cqx_hcr;
-       u64 cqx_c;
-       u64 cqx_herr;
-       u64 cqx_aer;
-/* 0x20  */
-       u64 cqx_ptp;
-       u64 cqx_tp;
-       u64 cqx_fec;
-       u64 cqx_feca;
-/* 0x40  */
-       u64 cqx_ep;
-       u64 cqx_eq;
-/* 0x50  */
-       u64 reserved1;
-       u64 cqx_n0;
-/* 0x60  */
-       u64 cqx_n1;
-       u64 reserved2[(0x1000 - 0x60) / 8];
-/* 0x1000 */
-};
-
-#define CQX_FEC_CQE_CNT           EHCA_BMASK_IBM(32, 63)
-#define CQX_FECADDER              EHCA_BMASK_IBM(32, 63)
-#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-
-#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
-
-/* EQ Table Entry Memory Map */
-struct hipz_eqtemm {
-       u64 eqx_hcr;
-       u64 eqx_c;
-
-       u64 eqx_herr;
-       u64 eqx_aer;
-/* 0x20 */
-       u64 eqx_ptp;
-       u64 eqx_tp;
-       u64 eqx_ssba;
-       u64 eqx_psba;
-
-/* 0x40 */
-       u64 eqx_cec;
-       u64 eqx_meql;
-       u64 eqx_xisbi;
-       u64 eqx_xisc;
-/* 0x60 */
-       u64 eqx_it;
-
-};
-
-#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
-
-/* access control defines for MR/MW */
-#define HIPZ_ACCESSCTRL_L_WRITE  0x00800000
-#define HIPZ_ACCESSCTRL_R_WRITE  0x00400000
-#define HIPZ_ACCESSCTRL_R_READ   0x00200000
-#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
-#define HIPZ_ACCESSCTRL_MW_BIND  0x00080000
-
-/* query hca response block */
-struct hipz_query_hca {
-       u32 cur_reliable_dg;
-       u32 cur_qp;
-       u32 cur_cq;
-       u32 cur_eq;
-       u32 cur_mr;
-       u32 cur_mw;
-       u32 cur_ee_context;
-       u32 cur_mcast_grp;
-       u32 cur_qp_attached_mcast_grp;
-       u32 reserved1;
-       u32 cur_ipv6_qp;
-       u32 cur_eth_qp;
-       u32 cur_hp_mr;
-       u32 reserved2[3];
-       u32 max_rd_domain;
-       u32 max_qp;
-       u32 max_cq;
-       u32 max_eq;
-       u32 max_mr;
-       u32 max_hp_mr;
-       u32 max_mw;
-       u32 max_mrwpte;
-       u32 max_special_mrwpte;
-       u32 max_rd_ee_context;
-       u32 max_mcast_grp;
-       u32 max_total_mcast_qp_attach;
-       u32 max_mcast_qp_attach;
-       u32 max_raw_ipv6_qp;
-       u32 max_raw_ethy_qp;
-       u32 internal_clock_frequency;
-       u32 max_pd;
-       u32 max_ah;
-       u32 max_cqe;
-       u32 max_wqes_wq;
-       u32 max_partitions;
-       u32 max_rr_ee_context;
-       u32 max_rr_qp;
-       u32 max_rr_hca;
-       u32 max_act_wqs_ee_context;
-       u32 max_act_wqs_qp;
-       u32 max_sge;
-       u32 max_sge_rd;
-       u32 memory_page_size_supported;
-       u64 max_mr_size;
-       u32 local_ca_ack_delay;
-       u32 num_ports;
-       u32 vendor_id;
-       u32 vendor_part_id;
-       u32 hw_ver;
-       u64 node_guid;
-       u64 hca_cap_indicators;
-       u32 data_counter_register_size;
-       u32 max_shared_rq;
-       u32 max_isns_eq;
-       u32 max_neq;
-} __attribute__ ((packed));
-
-#define HCA_CAP_AH_PORT_NR_CHECK      EHCA_BMASK_IBM( 0,  0)
-#define HCA_CAP_ATOMIC                EHCA_BMASK_IBM( 1,  1)
-#define HCA_CAP_AUTO_PATH_MIG         EHCA_BMASK_IBM( 2,  2)
-#define HCA_CAP_BAD_P_KEY_CTR         EHCA_BMASK_IBM( 3,  3)
-#define HCA_CAP_SQD_RTS_PORT_CHANGE   EHCA_BMASK_IBM( 4,  4)
-#define HCA_CAP_CUR_QP_STATE_MOD      EHCA_BMASK_IBM( 5,  5)
-#define HCA_CAP_INIT_TYPE             EHCA_BMASK_IBM( 6,  6)
-#define HCA_CAP_PORT_ACTIVE_EVENT     EHCA_BMASK_IBM( 7,  7)
-#define HCA_CAP_Q_KEY_VIOL_CTR        EHCA_BMASK_IBM( 8,  8)
-#define HCA_CAP_WQE_RESIZE            EHCA_BMASK_IBM( 9,  9)
-#define HCA_CAP_RAW_PACKET_MCAST      EHCA_BMASK_IBM(10, 10)
-#define HCA_CAP_SHUTDOWN_PORT         EHCA_BMASK_IBM(11, 11)
-#define HCA_CAP_RC_LL_QP              EHCA_BMASK_IBM(12, 12)
-#define HCA_CAP_SRQ                   EHCA_BMASK_IBM(13, 13)
-#define HCA_CAP_UD_LL_QP              EHCA_BMASK_IBM(16, 16)
-#define HCA_CAP_RESIZE_MR             EHCA_BMASK_IBM(17, 17)
-#define HCA_CAP_MINI_QP               EHCA_BMASK_IBM(18, 18)
-#define HCA_CAP_H_ALLOC_RES_SYNC      EHCA_BMASK_IBM(19, 19)
-
-/* query port response block */
-struct hipz_query_port {
-       u32 state;
-       u32 bad_pkey_cntr;
-       u32 lmc;
-       u32 lid;
-       u32 subnet_timeout;
-       u32 qkey_viol_cntr;
-       u32 sm_sl;
-       u32 sm_lid;
-       u32 capability_mask;
-       u32 init_type_reply;
-       u32 pkey_tbl_len;
-       u32 gid_tbl_len;
-       u64 gid_prefix;
-       u32 port_nr;
-       u16 pkey_entries[16];
-       u8  reserved1[32];
-       u32 trent_size;
-       u32 trbuf_size;
-       u64 max_msg_sz;
-       u32 max_mtu;
-       u32 vl_cap;
-       u32 phys_pstate;
-       u32 phys_state;
-       u32 phys_speed;
-       u32 phys_width;
-       u8  reserved2[1884];
-       u64 guid_entries[255];
-} __attribute__ ((packed));
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
deleted file mode 100644 (file)
index 7ffc748..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  internal queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ipz_pt_fn.h"
-#include "ehca_classes.h"
-
-#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
-
-struct kmem_cache *small_qp_cache;
-
-void *ipz_qpageit_get_inc(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       queue->current_q_offset += queue->pagesize;
-       if (queue->current_q_offset > queue->queue_length) {
-               queue->current_q_offset -= queue->pagesize;
-               ret = NULL;
-       }
-       if (((u64)ret) % queue->pagesize) {
-               ehca_gen_err("ERROR!! not at PAGE-Boundary");
-               return NULL;
-       }
-       return ret;
-}
-
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       u64 last_entry_in_q = queue->queue_length - queue->qe_size;
-
-       queue->current_q_offset += queue->qe_size;
-       if (queue->current_q_offset > last_entry_in_q) {
-               queue->current_q_offset = 0;
-               queue->toggle_state = (~queue->toggle_state) & 1;
-       }
-
-       return ret;
-}
-
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
-{
-       int i;
-       for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
-               u64 page = __pa(queue->queue_pages[i]);
-               if (addr >= page && addr < page + queue->pagesize) {
-                       *q_offset = addr - page + i * queue->pagesize;
-                       return 0;
-               }
-       }
-       return -EINVAL;
-}
-
-#if PAGE_SHIFT < EHCA_PAGESHIFT
-#error Kernel pages must be at least as large than eHCA pages (4K) !
-#endif
-
-/*
- * allocate pages for queue:
- * outer loop allocates whole kernel pages (page aligned) and
- * inner loop divides a kernel page into smaller hca queue pages
- */
-static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
-{
-       int k, f = 0;
-       u8 *kpage;
-
-       while (f < nr_of_pages) {
-               kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
-               if (!kpage)
-                       goto out;
-
-               for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
-                       queue->queue_pages[f] = (struct ipz_page *)kpage;
-                       kpage += EHCA_PAGESIZE;
-                       f++;
-               }
-       }
-       return 1;
-
-out:
-       for (f = 0; f < nr_of_pages && queue->queue_pages[f];
-            f += PAGES_PER_KPAGE)
-               free_page((unsigned long)(queue->queue_pages)[f]);
-       return 0;
-}
-
-static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
-       int order = ilog2(queue->pagesize) - 9;
-       struct ipz_small_queue_page *page;
-       unsigned long bit;
-
-       mutex_lock(&pd->lock);
-
-       if (!list_empty(&pd->free[order]))
-               page = list_entry(pd->free[order].next,
-                                 struct ipz_small_queue_page, list);
-       else {
-               page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
-               if (!page)
-                       goto out;
-
-               page->page = get_zeroed_page(GFP_KERNEL);
-               if (!page->page) {
-                       kmem_cache_free(small_qp_cache, page);
-                       goto out;
-               }
-
-               list_add(&page->list, &pd->free[order]);
-       }
-
-       bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
-       __set_bit(bit, page->bitmap);
-       page->fill++;
-
-       if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
-               list_move(&page->list, &pd->full[order]);
-
-       mutex_unlock(&pd->lock);
-
-       queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
-       queue->small_page = page;
-       queue->offset = bit << (order + 9);
-       return 1;
-
-out:
-       ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
-       mutex_unlock(&pd->lock);
-       return 0;
-}
-
-static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
-       int order = ilog2(queue->pagesize) - 9;
-       struct ipz_small_queue_page *page = queue->small_page;
-       unsigned long bit;
-       int free_page = 0;
-
-       bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
-               >> (order + 9);
-
-       mutex_lock(&pd->lock);
-
-       __clear_bit(bit, page->bitmap);
-       page->fill--;
-
-       if (page->fill == 0) {
-               list_del(&page->list);
-               free_page = 1;
-       }
-
-       if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
-               /* the page was full until we freed the chunk */
-               list_move_tail(&page->list, &pd->free[order]);
-
-       mutex_unlock(&pd->lock);
-
-       if (free_page) {
-               free_page(page->page);
-               kmem_cache_free(small_qp_cache, page);
-       }
-}
-
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
-                  const u32 nr_of_pages, const u32 pagesize,
-                  const u32 qe_size, const u32 nr_of_sg,
-                  int is_small)
-{
-       if (pagesize > PAGE_SIZE) {
-               ehca_gen_err("FATAL ERROR: pagesize=%x "
-                            "is greater than kernel page size", pagesize);
-               return 0;
-       }
-
-       /* init queue fields */
-       queue->queue_length = nr_of_pages * pagesize;
-       queue->pagesize = pagesize;
-       queue->qe_size = qe_size;
-       queue->act_nr_of_sg = nr_of_sg;
-       queue->current_q_offset = 0;
-       queue->toggle_state = 1;
-       queue->small_page = NULL;
-
-       /* allocate queue page pointers */
-       queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
-                                    GFP_KERNEL | __GFP_NOWARN);
-       if (!queue->queue_pages) {
-               queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
-               if (!queue->queue_pages) {
-                       ehca_gen_err("Couldn't allocate queue page list");
-                       return 0;
-               }
-       }
-
-       /* allocate actual queue pages */
-       if (is_small) {
-               if (!alloc_small_queue_page(queue, pd))
-                       goto ipz_queue_ctor_exit0;
-       } else
-               if (!alloc_queue_pages(queue, nr_of_pages))
-                       goto ipz_queue_ctor_exit0;
-
-       return 1;
-
-ipz_queue_ctor_exit0:
-       ehca_gen_err("Couldn't alloc pages queue=%p "
-                "nr_of_pages=%x",  queue, nr_of_pages);
-       kvfree(queue->queue_pages);
-
-       return 0;
-}
-
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
-{
-       int i, nr_pages;
-
-       if (!queue || !queue->queue_pages) {
-               ehca_gen_dbg("queue or queue_pages is NULL");
-               return 0;
-       }
-
-       if (queue->small_page)
-               free_small_queue_page(queue, pd);
-       else {
-               nr_pages = queue->queue_length / queue->pagesize;
-               for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
-                       free_page((unsigned long)queue->queue_pages[i]);
-       }
-
-       kvfree(queue->queue_pages);
-
-       return 1;
-}
-
-int ehca_init_small_qp_cache(void)
-{
-       small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
-                                          sizeof(struct ipz_small_queue_page),
-                                          0, SLAB_HWCACHE_ALIGN, NULL);
-       if (!small_qp_cache)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void ehca_cleanup_small_qp_cache(void)
-{
-       kmem_cache_destroy(small_qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
deleted file mode 100644 (file)
index a801274..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  internal queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __IPZ_PT_FN_H__
-#define __IPZ_PT_FN_H__
-
-#define EHCA_PAGESHIFT   12
-#define EHCA_PAGESIZE   4096UL
-#define EHCA_PAGEMASK   (~(EHCA_PAGESIZE-1))
-#define EHCA_PT_ENTRIES 512UL
-
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-
-struct ehca_pd;
-struct ipz_small_queue_page;
-
-extern struct kmem_cache *small_qp_cache;
-
-/* struct generic ehca page */
-struct ipz_page {
-       u8 entries[EHCA_PAGESIZE];
-};
-
-#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
-
-struct ipz_small_queue_page {
-       unsigned long page;
-       unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
-       int fill;
-       void *mapped_addr;
-       u32 mmap_count;
-       struct list_head list;
-};
-
-/* struct generic queue in linux kernel virtual memory (kv) */
-struct ipz_queue {
-       u64 current_q_offset;   /* current queue entry */
-
-       struct ipz_page **queue_pages;  /* array of pages belonging to queue */
-       u32 qe_size;            /* queue entry size */
-       u32 act_nr_of_sg;
-       u32 queue_length;       /* queue length allocated in bytes */
-       u32 pagesize;
-       u32 toggle_state;       /* toggle flag - per page */
-       u32 offset; /* save offset within page for small_qp */
-       struct ipz_small_queue_page *small_page;
-};
-
-/*
- * return current Queue Entry for a certain q_offset
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
-{
-       struct ipz_page *current_page;
-       if (q_offset >= queue->queue_length)
-               return NULL;
-       current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
-       return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
-}
-
-/*
- * return current Queue Entry
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_get(struct ipz_queue *queue)
-{
-       return ipz_qeit_calc(queue, queue->current_q_offset);
-}
-
-/*
- * return current Queue Page , increment Queue Page iterator from
- * page to page in struct ipz_queue, last increment will return 0! and
- * NOT wrap
- * returns address (kv) of Queue Page
- * warning don't use in parallel with ipz_QE_get_inc()
- */
-void *ipz_qpageit_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       queue->current_q_offset += queue->qe_size;
-       if (queue->current_q_offset >= queue->queue_length) {
-               queue->current_q_offset = 0;
-               /* toggle the valid flag */
-               queue->toggle_state = (~queue->toggle_state) & 1;
-       }
-
-       return ret;
-}
-
-/*
- * return a bool indicating whether current Queue Entry is valid
- */
-static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
-{
-       struct ehca_cqe *cqe = ipz_qeit_get(queue);
-       return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
-}
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
-{
-       return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
-}
-
-/*
- * returns and resets Queue Entry iterator
- * returns address (kv) of first Queue Entry
- */
-static inline void *ipz_qeit_reset(struct ipz_queue *queue)
-{
-       queue->current_q_offset = 0;
-       return ipz_qeit_get(queue);
-}
-
-/*
- * return the q_offset corresponding to an absolute address
- */
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
-
-/*
- * return the next queue offset. don't modify the queue.
- */
-static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
-{
-       offset += queue->qe_size;
-       if (offset >= queue->queue_length) offset = 0;
-       return offset;
-}
-
-/* struct generic page table */
-struct ipz_pt {
-       u64 entries[EHCA_PT_ENTRIES];
-};
-
-/* struct page table for a queue, only to be used in pf */
-struct ipz_qpt {
-       /* queue page tables (kv), use u64 because we know the element length */
-       u64 *qpts;
-       u32 n_qpts;
-       u32 n_ptes;       /*  number of page table entries */
-       u64 *current_pte_addr;
-};
-
-/*
- * constructor for a ipz_queue_t, placement new for ipz_queue_t,
- * new for all dependent datastructors
- * all QP Tables are the same
- * flow:
- *    allocate+pin queue
- * see ipz_qpt_ctor()
- * returns true if ok, false if out of memory
- */
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
-                  const u32 nr_of_pages, const u32 pagesize,
-                  const u32 qe_size, const u32 nr_of_sg,
-                  int is_small);
-
-/*
- * destructor for a ipz_queue_t
- *  -# free queue
- *  see ipz_queue_ctor()
- *  returns true if ok, false if queue was NULL-ptr of free failed
- */
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
-
-/*
- * constructor for a ipz_qpt_t,
- * placement new for struct ipz_queue, new for all dependent datastructors
- * all QP Tables are the same,
- * flow:
- * -# allocate+pin queue
- * -# initialise ptcb
- * -# allocate+pin PTs
- * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
- * -# the ring must have room for exactly nr_of_PTEs
- * see ipz_qpt_ctor()
- */
-void ipz_qpt_ctor(struct ipz_qpt *qpt,
-                 const u32 nr_of_qes,
-                 const u32 pagesize,
-                 const u32 qe_size,
-                 const u8 lowbyte, const u8 toggle,
-                 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- * fix EQ page problems
- */
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Event Queue Entry, increment Queue Entry iterator
- * by one step in struct ipz_queue if valid, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_queue_QPageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- */
-static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       u32 qe = *(u8 *)ret;
-       if ((qe >> 7) != (queue->toggle_state & 1))
-               return NULL;
-       ipz_qeit_eq_get_inc(queue); /* this is a good one */
-       return ret;
-}
-
-static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       u32 qe = *(u8 *)ret;
-       if ((qe >> 7) != (queue->toggle_state & 1))
-               return NULL;
-       return ret;
-}
-
-/* returns address (GX) of first queue entry */
-static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
-{
-       return be64_to_cpu(qpt->qpts[0]);
-}
-
-/* returns address (kv) of first page of queue page table */
-static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
-{
-       return qpt->qpts;
-}
-
-#endif                         /* __IPZ_PT_FN_H__ */
diff --git a/drivers/staging/rdma/ipath/Kconfig b/drivers/staging/rdma/ipath/Kconfig
deleted file mode 100644 (file)
index 041ce06..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-config INFINIBAND_IPATH
-       tristate "QLogic HTX HCA support"
-       depends on 64BIT && NET && HT_IRQ
-       ---help---
-       This is a driver for the deprecated QLogic Hyper-Transport
-       IB host channel adapter (model QHT7140),
-       including InfiniBand verbs support.  This driver allows these
-       devices to be used with both kernel upper level protocols such
-       as IP-over-InfiniBand as well as with userspace applications
-       (in conjunction with InfiniBand userspace access).
-       For QLogic PCIe QLE based cards, use the QIB driver instead.
-
-       If you have this hardware you will need to boot with PAT disabled
-       on your x86-64 systems, use the nopat kernel parameter.
-
-       Note that this driver will soon be removed entirely from the kernel.
diff --git a/drivers/staging/rdma/ipath/Makefile b/drivers/staging/rdma/ipath/Makefile
deleted file mode 100644 (file)
index 4496f28..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-       -DIPATH_KERN_TYPE=0
-
-obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
-
-ib_ipath-y := \
-       ipath_cq.o \
-       ipath_diag.o \
-       ipath_dma.o \
-       ipath_driver.o \
-       ipath_eeprom.o \
-       ipath_file_ops.o \
-       ipath_fs.o \
-       ipath_init_chip.o \
-       ipath_intr.o \
-       ipath_keys.o \
-       ipath_mad.o \
-       ipath_mmap.o \
-       ipath_mr.o \
-       ipath_qp.o \
-       ipath_rc.o \
-       ipath_ruc.o \
-       ipath_sdma.o \
-       ipath_srq.o \
-       ipath_stats.o \
-       ipath_sysfs.o \
-       ipath_uc.o \
-       ipath_ud.o \
-       ipath_user_pages.o \
-       ipath_user_sdma.o \
-       ipath_verbs_mcast.o \
-       ipath_verbs.o
-
-ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-
-ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
-ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/staging/rdma/ipath/TODO b/drivers/staging/rdma/ipath/TODO
deleted file mode 100644 (file)
index cb00158..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-The ipath driver has been moved to staging in preparation for its removal in a
-few releases. The driver will be deleted during the 4.6 merge window.
-
-Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
-Cc: linux-rdma@vger.kernel.org
diff --git a/drivers/staging/rdma/ipath/ipath_common.h b/drivers/staging/rdma/ipath/ipath_common.h
deleted file mode 100644 (file)
index 28cfe97..0000000
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_COMMON_H
-#define _IPATH_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-
-/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
-#define IPATH_SRC_OUI_1 0x00
-#define IPATH_SRC_OUI_2 0x11
-#define IPATH_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in faspath code
- * _IPATH_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
- */
-
-/*
- * The value in the BTH QP field that InfiniPath uses to differentiate
- * an infinipath protocol IB packet vs standard IB transport
- */
-#define IPATH_KD_QP 0x656b79
-
-/*
- * valid states passed to ipath_set_linkstate() user call
- */
-#define IPATH_IB_LINKDOWN              0
-#define IPATH_IB_LINKARM               1
-#define IPATH_IB_LINKACTIVE            2
-#define IPATH_IB_LINKDOWN_ONLY         3
-#define IPATH_IB_LINKDOWN_SLEEP                4
-#define IPATH_IB_LINKDOWN_DISABLE      5
-#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
-#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
-#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
-#define IPATH_IB_LINK_HRTBT    9 /* enable heartbeat, normal, non-loopback */
-
-/*
- * These 3 values (SDR and DDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
- * are also the the possible values for ipath_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define IPATH_IB_SDR 1
-#define IPATH_IB_DDR 2
-
-/*
- * stats maintained by the driver.  For now, at least, this is global
- * to all minor devices.
- */
-struct infinipath_stats {
-       /* number of interrupts taken */
-       __u64 sps_ints;
-       /* number of interrupts for errors */
-       __u64 sps_errints;
-       /* number of errors from chip (not incl. packet errors or CRC) */
-       __u64 sps_errs;
-       /* number of packet errors from chip other than CRC */
-       __u64 sps_pkterrs;
-       /* number of packets with CRC errors (ICRC and VCRC) */
-       __u64 sps_crcerrs;
-       /* number of hardware errors reported (parity, etc.) */
-       __u64 sps_hwerrs;
-       /* number of times IB link changed state unexpectedly */
-       __u64 sps_iblink;
-       __u64 sps_unused; /* was fastrcvint, no longer implemented */
-       /* number of kernel (port0) packets received */
-       __u64 sps_port0pkts;
-       /* number of "ethernet" packets sent by driver */
-       __u64 sps_ether_spkts;
-       /* number of "ethernet" packets received by driver */
-       __u64 sps_ether_rpkts;
-       /* number of SMA packets sent by driver. Obsolete. */
-       __u64 sps_sma_spkts;
-       /* number of SMA packets received by driver. Obsolete. */
-       __u64 sps_sma_rpkts;
-       /* number of times all ports rcvhdrq was full and packet dropped */
-       __u64 sps_hdrqfull;
-       /* number of times all ports egrtid was full and packet dropped */
-       __u64 sps_etidfull;
-       /*
-        * number of times we tried to send from driver, but no pio buffers
-        * avail
-        */
-       __u64 sps_nopiobufs;
-       /* number of ports currently open */
-       __u64 sps_ports;
-       /* list of pkeys (other than default) accepted (0 means not set) */
-       __u16 sps_pkeys[4];
-       __u16 sps_unused16[4]; /* available; maintaining compatible layout */
-       /* number of user ports per chip (not IB ports) */
-       __u32 sps_nports;
-       /* not our interrupt, or already handled */
-       __u32 sps_nullintr;
-       /* max number of packets handled per receive call */
-       __u32 sps_maxpkts_call;
-       /* avg number of packets handled per receive call */
-       __u32 sps_avgpkts_call;
-       /* total number of pages locked */
-       __u64 sps_pagelocks;
-       /* total number of pages unlocked */
-       __u64 sps_pageunlocks;
-       /*
-        * Number of packets dropped in kernel other than errors (ether
-        * packets if ipath not configured, etc.)
-        */
-       __u64 sps_krdrops;
-       __u64 sps_txeparity; /* PIO buffer parity error, recovered */
-       /* pad for future growth */
-       __u64 __sps_pad[45];
-};
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file.
- */
-#define IPATH_STATUS_INITTED       0x1 /* basic initialization done */
-#define IPATH_STATUS_DISABLED      0x2 /* hardware disabled */
-/* Device has been disabled via admin request */
-#define IPATH_STATUS_ADMIN_DISABLED    0x4
-/* Chip has been found and initted */
-#define IPATH_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define IPATH_STATUS_IB_READY     0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define IPATH_STATUS_IB_CONF      0x80
-/* no link established, probably no cable */
-#define IPATH_STATUS_IB_NOCABLE  0x100
-/* A Fatal hardware error has occurred. */
-#define IPATH_STATUS_HWERROR     0x200
-
-/*
- * The list of usermode accessible registers.  Also see Reg_* later in file.
- */
-typedef enum _ipath_ureg {
-       /* (RO)  DMA RcvHdr to be used next. */
-       ur_rcvhdrtail = 0,
-       /* (RW)  RcvHdr entry to be processed next by host. */
-       ur_rcvhdrhead = 1,
-       /* (RO)  Index of next Eager index to use. */
-       ur_rcvegrindextail = 2,
-       /* (RW)  Eager TID to be processed next */
-       ur_rcvegrindexhead = 3,
-       /* For internal use only; max register number. */
-       _IPATH_UregMax
-} ipath_ureg;
-
-/* bit values for spi_runtime_flags */
-#define IPATH_RUNTIME_HT       0x1
-#define IPATH_RUNTIME_PCIE     0x2
-#define IPATH_RUNTIME_FORCE_WC_ORDER   0x4
-#define IPATH_RUNTIME_RCVHDR_COPY      0x8
-#define IPATH_RUNTIME_MASTER   0x10
-#define IPATH_RUNTIME_NODMA_RTAIL 0x80
-#define IPATH_RUNTIME_SDMA           0x200
-#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
-#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
-
-/*
- * This structure is returned by ipath_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct ipath_base_info {
-       /* version of hardware, for feature checking. */
-       __u32 spi_hw_version;
-       /* version of software, for feature checking. */
-       __u32 spi_sw_version;
-       /* InfiniPath port assigned, goes into sent packets */
-       __u16 spi_port;
-       __u16 spi_subport;
-       /*
-        * IB MTU, packets IB data must be less than this.
-        * The MTU is in bytes, and will be a multiple of 4 bytes.
-        */
-       __u32 spi_mtu;
-       /*
-        * Size of a PIO buffer.  Any given packet's total size must be less
-        * than this (in words).  Included is the starting control word, so
-        * if 513 is returned, then total pkt size is 512 words or less.
-        */
-       __u32 spi_piosize;
-       /* size of the TID cache in infinipath, in entries */
-       __u32 spi_tidcnt;
-       /* size of the TID Eager list in infinipath, in entries */
-       __u32 spi_tidegrcnt;
-       /* size of a single receive header queue entry in words. */
-       __u32 spi_rcvhdrent_size;
-       /*
-        * Count of receive header queue entries allocated.
-        * This may be less than the spu_rcvhdrcnt passed in!.
-        */
-       __u32 spi_rcvhdr_cnt;
-
-       /* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
-       __u32 spi_runtime_flags;
-
-       /* address where receive buffer queue is mapped into */
-       __u64 spi_rcvhdr_base;
-
-       /* user program. */
-
-       /* base address of eager TID receive buffers. */
-       __u64 spi_rcv_egrbufs;
-
-       /* Allocated by initialization code, not by protocol. */
-
-       /*
-        * Size of each TID buffer in host memory, starting at
-        * spi_rcv_egrbufs.  The buffers are virtually contiguous.
-        */
-       __u32 spi_rcv_egrbufsize;
-       /*
-        * The special QP (queue pair) value that identifies an infinipath
-        * protocol packet from standard IB packets.  More, probably much
-        * more, to be added.
-        */
-       __u32 spi_qpair;
-
-       /*
-        * User register base for init code, not to be used directly by
-        * protocol or applications.
-        */
-       __u64 __spi_uregbase;
-       /*
-        * Maximum buffer size in bytes that can be used in a single TID
-        * entry (assuming the buffer is aligned to this boundary).  This is
-        * the minimum of what the hardware and software support Guaranteed
-        * to be a power of 2.
-        */
-       __u32 spi_tid_maxsize;
-       /*
-        * alignment of each pio send buffer (byte count
-        * to add to spi_piobufbase to get to second buffer)
-        */
-       __u32 spi_pioalign;
-       /*
-        * The index of the first pio buffer available to this process;
-        * needed to do lookup in spi_pioavailaddr; not added to
-        * spi_piobufbase.
-        */
-       __u32 spi_pioindex;
-        /* number of buffers mapped for this process */
-       __u32 spi_piocnt;
-
-       /*
-        * Base address of writeonly pio buffers for this process.
-        * Each buffer has spi_piosize words, and is aligned on spi_pioalign
-        * boundaries.  spi_piocnt buffers are mapped from this address
-        */
-       __u64 spi_piobufbase;
-
-       /*
-        * Base address of readonly memory copy of the pioavail registers.
-        * There are 2 bits for each buffer.
-        */
-       __u64 spi_pioavailaddr;
-
-       /*
-        * Address where driver updates a copy of the interface and driver
-        * status (IPATH_STATUS_*) as a 64 bit value.  It's followed by a
-        * string indicating hardware error, if there was one.
-        */
-       __u64 spi_status;
-
-       /* number of chip ports available to user processes */
-       __u32 spi_nports;
-       /* unit number of chip we are using */
-       __u32 spi_unit;
-       /* num bufs in each contiguous set */
-       __u32 spi_rcv_egrperchunk;
-       /* size in bytes of each contiguous set */
-       __u32 spi_rcv_egrchunksize;
-       /* total size of mmap to cover full rcvegrbuffers */
-       __u32 spi_rcv_egrbuftotlen;
-       __u32 spi_filler_for_align;
-       /* address of readonly memory copy of the rcvhdrq tail register. */
-       __u64 spi_rcvhdr_tailaddr;
-
-       /* shared memory pages for subports if port is shared */
-       __u64 spi_subport_uregbase;
-       __u64 spi_subport_rcvegrbuf;
-       __u64 spi_subport_rcvhdr_base;
-
-       /* shared memory page for hardware port if it is shared */
-       __u64 spi_port_uregbase;
-       __u64 spi_port_rcvegrbuf;
-       __u64 spi_port_rcvhdr_base;
-       __u64 spi_port_rcvhdr_tailaddr;
-
-} __attribute__ ((aligned(8)));
-
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of ipath_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way.  The driver must be the same or higher
- * for initialization to succeed.  In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define IPATH_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define IPATH_USER_SWMINOR 6
-
-#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
-
-#define IPATH_KERN_TYPE 0
-
-/*
- * Similarly, this is the kernel version going back to the user.  It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of ipath_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
-
-/*
- * This structure is passed to ipath_userinit() to tell the driver where
- * user code buffers are, sizes, etc.   The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility.  It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct ipath_user_info {
-       /*
-        * version of user software, to detect compatibility issues.
-        * Should be set to IPATH_USER_SWVERSION.
-        */
-       __u32 spu_userversion;
-
-       /* desired number of receive header queue entries */
-       __u32 spu_rcvhdrcnt;
-
-       /* size of struct base_info to write to */
-       __u32 spu_base_info_size;
-
-       /*
-        * number of words in KD protocol header
-        * This tells InfiniPath how many words to copy to rcvhdrq.  If 0,
-        * kernel uses a default.  Once set, attempts to set any other value
-        * are an error (EAGAIN) until driver is reloaded.
-        */
-       __u32 spu_rcvhdrsize;
-
-       /*
-        * If two or more processes wish to share a port, each process
-        * must set the spu_subport_cnt and spu_subport_id to the same
-        * values.  The only restriction on the spu_subport_id is that
-        * it be unique for a given node.
-        */
-       __u16 spu_subport_cnt;
-       __u16 spu_subport_id;
-
-       __u32 spu_unused; /* kept for compatible layout */
-
-       /*
-        * address of struct base_info to write to
-        */
-       __u64 spu_base_info;
-
-} __attribute__ ((aligned(8)));
-
-/* User commands. */
-
-#define IPATH_CMD_MIN          16
-
-#define __IPATH_CMD_USER_INIT  16      /* old set up userspace (for old user code) */
-#define IPATH_CMD_PORT_INFO    17      /* find out what resources we got */
-#define IPATH_CMD_RECV_CTRL    18      /* control receipt of packets */
-#define IPATH_CMD_TID_UPDATE   19      /* update expected TID entries */
-#define IPATH_CMD_TID_FREE     20      /* free expected TID entries */
-#define IPATH_CMD_SET_PART_KEY 21      /* add partition key */
-#define __IPATH_CMD_SLAVE_INFO 22      /* return info on slave processes (for old user code) */
-#define IPATH_CMD_ASSIGN_PORT  23      /* allocate HCA and port */
-#define IPATH_CMD_USER_INIT    24      /* set up userspace */
-#define IPATH_CMD_UNUSED_1     25
-#define IPATH_CMD_UNUSED_2     26
-#define IPATH_CMD_PIOAVAILUPD  27      /* force an update of PIOAvail reg */
-#define IPATH_CMD_POLL_TYPE    28      /* set the kind of polling we want */
-#define IPATH_CMD_ARMLAUNCH_CTRL       29 /* armlaunch detection control */
-/* 30 is unused */
-#define IPATH_CMD_SDMA_INFLIGHT 31     /* sdma inflight counter request */
-#define IPATH_CMD_SDMA_COMPLETE 32     /* sdma completion counter request */
-
-/*
- * Poll types
- */
-#define IPATH_POLL_TYPE_URGENT  0x01
-#define IPATH_POLL_TYPE_OVERFLOW 0x02
-
-struct ipath_port_info {
-       __u32 num_active;       /* number of active units */
-       __u32 unit;             /* unit (chip) assigned to caller */
-       __u16 port;             /* port on unit assigned to caller */
-       __u16 subport;          /* subport on unit assigned to caller */
-       __u16 num_ports;        /* number of ports available on unit */
-       __u16 num_subports;     /* number of subports opened on port */
-};
-
-struct ipath_tid_info {
-       __u32 tidcnt;
-       /* make structure same size in 32 and 64 bit */
-       __u32 tid__unused;
-       /* virtual address of first page in transfer */
-       __u64 tidvaddr;
-       /* pointer (same size 32/64 bit) to __u16 tid array */
-       __u64 tidlist;
-
-       /*
-        * pointer (same size 32/64 bit) to bitmap of TIDs used
-        * for this call; checked for being large enough at open
-        */
-       __u64 tidmap;
-};
-
-struct ipath_cmd {
-       __u32 type;                     /* command type */
-       union {
-               struct ipath_tid_info tid_info;
-               struct ipath_user_info user_info;
-
-               /*
-                * address in userspace where we should put the sdma
-                * inflight counter
-                */
-               __u64 sdma_inflight;
-               /*
-                * address in userspace where we should put the sdma
-                * completion counter
-                */
-               __u64 sdma_complete;
-               /* address in userspace of struct ipath_port_info to
-                  write result to */
-               __u64 port_info;
-               /* enable/disable receipt of packets */
-               __u32 recv_ctrl;
-               /* enable/disable armlaunch errors (non-zero to enable) */
-               __u32 armlaunch_ctrl;
-               /* partition key to set */
-               __u16 part_key;
-               /* user address of __u32 bitmask of active slaves */
-               __u64 slave_mask_addr;
-               /* type of polling we want */
-               __u16 poll_type;
-       } cmd;
-};
-
-struct ipath_iovec {
-       /* Pointer to data, but same size 32 and 64 bit */
-       __u64 iov_base;
-
-       /*
-        * Length of data; don't need 64 bits, but want
-        * ipath_sendpkt to remain same size as before 32 bit changes, so...
-        */
-       __u64 iov_len;
-};
-
-/*
- * Describes a single packet for send.  Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the ipath_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __ipath_sendpkt {
-       __u32 sps_flags;        /* flags for packet (TBD) */
-       __u32 sps_cnt;          /* number of entries to use in sps_iov */
-       /* array of iov's describing packet. TEMPORARY */
-       struct ipath_iovec sps_iov[4];
-};
-
-/*
- * diagnostics can send a packet by "writing" one of the following
- * two structs to diag data special file
- * The first is the legacy version for backward compatibility
- */
-struct ipath_diag_pkt {
-       __u32 unit;
-       __u64 data;
-       __u32 len;
-};
-
-/* The second diag_pkt struct is the expanded version that allows
- * more control over the packet, specifically, by allowing a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-struct ipath_diag_xpkt {
-       __u64 data;
-       __u64 pbc_wd;
-       __u32 unit;
-       __u32 len;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define IPATH_FLASH_VERSION 2
-struct ipath_flash {
-       /* flash layout version (IPATH_FLASH_VERSION) */
-       __u8 if_fversion;
-       /* checksum protecting if_length bytes */
-       __u8 if_csum;
-       /*
-        * valid length (in use, protected by if_csum), including
-        * if_fversion and if_csum themselves)
-        */
-       __u8 if_length;
-       /* the GUID, in network order */
-       __u8 if_guid[8];
-       /* number of GUIDs to use, starting from if_guid */
-       __u8 if_numguid;
-       /* the (last 10 characters of) board serial number, in ASCII */
-       char if_serial[12];
-       /* board mfg date (YYYYMMDD ASCII) */
-       char if_mfgdate[8];
-       /* last board rework/test date (YYYYMMDD ASCII) */
-       char if_testdate[8];
-       /* logging of error counts, TBD */
-       __u8 if_errcntp[4];
-       /* powered on hours, updated at driver unload */
-       __u8 if_powerhour[2];
-       /* ASCII free-form comment field */
-       char if_comment[32];
-       /* Backwards compatible prefix for longer QLogic Serial Numbers */
-       char if_sprefix[4];
-       /* 82 bytes used, min flash size is 128 bytes */
-       __u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct infinipath_counters {
-       __u64 LBIntCnt;
-       __u64 LBFlowStallCnt;
-       __u64 TxSDmaDescCnt;    /* was Reserved1 */
-       __u64 TxUnsupVLErrCnt;
-       __u64 TxDataPktCnt;
-       __u64 TxFlowPktCnt;
-       __u64 TxDwordCnt;
-       __u64 TxLenErrCnt;
-       __u64 TxMaxMinLenErrCnt;
-       __u64 TxUnderrunCnt;
-       __u64 TxFlowStallCnt;
-       __u64 TxDroppedPktCnt;
-       __u64 RxDroppedPktCnt;
-       __u64 RxDataPktCnt;
-       __u64 RxFlowPktCnt;
-       __u64 RxDwordCnt;
-       __u64 RxLenErrCnt;
-       __u64 RxMaxMinLenErrCnt;
-       __u64 RxICRCErrCnt;
-       __u64 RxVCRCErrCnt;
-       __u64 RxFlowCtrlErrCnt;
-       __u64 RxBadFormatCnt;
-       __u64 RxLinkProblemCnt;
-       __u64 RxEBPCnt;
-       __u64 RxLPCRCErrCnt;
-       __u64 RxBufOvflCnt;
-       __u64 RxTIDFullErrCnt;
-       __u64 RxTIDValidErrCnt;
-       __u64 RxPKeyMismatchCnt;
-       __u64 RxP0HdrEgrOvflCnt;
-       __u64 RxP1HdrEgrOvflCnt;
-       __u64 RxP2HdrEgrOvflCnt;
-       __u64 RxP3HdrEgrOvflCnt;
-       __u64 RxP4HdrEgrOvflCnt;
-       __u64 RxP5HdrEgrOvflCnt;
-       __u64 RxP6HdrEgrOvflCnt;
-       __u64 RxP7HdrEgrOvflCnt;
-       __u64 RxP8HdrEgrOvflCnt;
-       __u64 RxP9HdrEgrOvflCnt;        /* was Reserved6 */
-       __u64 RxP10HdrEgrOvflCnt;       /* was Reserved7 */
-       __u64 RxP11HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP12HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP13HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP14HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP15HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP16HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 IBStatusChangeCnt;
-       __u64 IBLinkErrRecoveryCnt;
-       __u64 IBLinkDownedCnt;
-       __u64 IBSymbolErrCnt;
-       /* The following are new for IBA7220 */
-       __u64 RxVL15DroppedPktCnt;
-       __u64 RxOtherLocalPhyErrCnt;
-       __u64 PcieRetryBufDiagQwordCnt;
-       __u64 ExcessBufferOvflCnt;
-       __u64 LocalLinkIntegrityErrCnt;
-       __u64 RxVlErrCnt;
-       __u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software
- * The other bits that are used only by the driver or diags are in
- * ipath_registers.h
- */
-
-/* RcvHdrFlags bits */
-#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
-#define INFINIPATH_RHF_LENGTH_SHIFT 0
-#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
-#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
-#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
-#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
-#define INFINIPATH_RHF_SEQ_MASK 0xF
-#define INFINIPATH_RHF_SEQ_SHIFT 0
-#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
-#define INFINIPATH_RHF_H_ICRCERR   0x80000000
-#define INFINIPATH_RHF_H_VCRCERR   0x40000000
-#define INFINIPATH_RHF_H_PARITYERR 0x20000000
-#define INFINIPATH_RHF_H_LENERR    0x10000000
-#define INFINIPATH_RHF_H_MTUERR    0x08000000
-#define INFINIPATH_RHF_H_IHDRERR   0x04000000
-#define INFINIPATH_RHF_H_TIDERR    0x02000000
-#define INFINIPATH_RHF_H_MKERR     0x01000000
-#define INFINIPATH_RHF_H_IBERR     0x00800000
-#define INFINIPATH_RHF_H_ERR_MASK  0xFF800000
-#define INFINIPATH_RHF_L_USE_EGR   0x80000000
-#define INFINIPATH_RHF_L_SWA       0x00008000
-#define INFINIPATH_RHF_L_SWB       0x00004000
-
-/* infinipath header fields */
-#define INFINIPATH_I_VERS_MASK 0xF
-#define INFINIPATH_I_VERS_SHIFT 28
-#define INFINIPATH_I_PORT_MASK 0xF
-#define INFINIPATH_I_PORT_SHIFT 24
-#define INFINIPATH_I_TID_MASK 0x7FF
-#define INFINIPATH_I_TID_SHIFT 13
-#define INFINIPATH_I_OFFSET_MASK 0x1FFF
-#define INFINIPATH_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define INFINIPATH_KPF_INTR 0x1
-#define INFINIPATH_KPF_SUBPORT_MASK 0x3
-#define INFINIPATH_KPF_SUBPORT_SHIFT 1
-
-#define INFINIPATH_MAX_SUBPORT 4
-
-/* SendPIO per-buffer control */
-#define INFINIPATH_SP_TEST    0x40
-#define INFINIPATH_SP_TESTEBP 0x20
-#define INFINIPATH_SP_TRIGGER_SHIFT  15
-
-/* SendPIOAvail bits */
-#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
-#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* infinipath header format */
-struct ipath_header {
-       /*
-        * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
-        * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
-        * Port 4, TID 11, offset 13.
-        */
-       __le32 ver_port_tid_offset;
-       __le16 chksum;
-       __le16 pkt_flags;
-};
-
-/* infinipath user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ infinipath.
- */
-struct ipath_message_header {
-       __be16 lrh[4];
-       __be32 bth[3];
-       /* fields below this point are in host byte order */
-       struct ipath_header iph;
-       __u8 sub_opcode;
-};
-
-/* infinipath ethernet header format */
-struct ether_header {
-       __be16 lrh[4];
-       __be32 bth[3];
-       struct ipath_header iph;
-       __u8 sub_opcode;
-       __u8 cmd;
-       __be16 lid;
-       __u16 mac[3];
-       __u8 frag_num;
-       __u8 seq_num;
-       __le32 len;
-       /* MUST be of word size due to PIO write requirements */
-       __le32 csum;
-       __le16 csum_offset;
-       __le16 flags;
-       __u16 first_2_bytes;
-       __u8 unused[2];         /* currently unused */
-};
-
-
-/* IB - LRH header consts */
-#define IPATH_LRH_GRH 0x0003   /* 1. word of IB LRH - next header: GRH */
-#define IPATH_LRH_BTH 0x0002   /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define IPATH_DEFAULT_P_KEY 0xFFFF
-#define IPATH_PERMISSIVE_LID 0xFFFF
-#define IPATH_AETH_CREDIT_SHIFT 24
-#define IPATH_AETH_CREDIT_MASK 0x1F
-#define IPATH_AETH_CREDIT_INVAL 0x1F
-#define IPATH_PSN_MASK 0xFFFFFF
-#define IPATH_MSN_MASK 0xFFFFFF
-#define IPATH_QPN_MASK 0xFFFFFF
-#define IPATH_MULTICAST_LID_BASE 0xC000
-#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
-#define IPATH_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from infinipath) */
-#define RCVHQ_RCV_TYPE_EXPECTED  0
-#define RCVHQ_RCV_TYPE_EAGER     1
-#define RCVHQ_RCV_TYPE_NON_KD    2
-#define RCVHQ_RCV_TYPE_ERROR     3
-
-
-/* sub OpCodes - ith4x  */
-#define IPATH_ITH4X_OPCODE_ENCAP 0x81
-#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
-
-#define IPATH_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
-{
-       return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
-}
-
-static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
-{
-       return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
-           & INFINIPATH_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
-{
-       return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
-               & INFINIPATH_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
-{
-       return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
-           & INFINIPATH_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
-{
-       return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
-               & INFINIPATH_RHF_SEQ_MASK;
-}
-
-static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
-{
-       return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
-               & INFINIPATH_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
-{
-       return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
-}
-
-static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
-{
-       return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
-           & INFINIPATH_I_VERS_MASK;
-}
-
-#endif                         /* _IPATH_COMMON_H */
diff --git a/drivers/staging/rdma/ipath/ipath_cq.c b/drivers/staging/rdma/ipath/ipath_cq.c
deleted file mode 100644 (file)
index e9dd911..0000000
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
-{
-       struct ipath_cq_wc *wc;
-       unsigned long flags;
-       u32 head;
-       u32 next;
-
-       spin_lock_irqsave(&cq->lock, flags);
-
-       /*
-        * Note that the head pointer might be writable by user processes.
-        * Take care to verify it is a sane value.
-        */
-       wc = cq->queue;
-       head = wc->head;
-       if (head >= (unsigned) cq->ibcq.cqe) {
-               head = cq->ibcq.cqe;
-               next = 0;
-       } else
-               next = head + 1;
-       if (unlikely(next == wc->tail)) {
-               spin_unlock_irqrestore(&cq->lock, flags);
-               if (cq->ibcq.event_handler) {
-                       struct ib_event ev;
-
-                       ev.device = cq->ibcq.device;
-                       ev.element.cq = &cq->ibcq;
-                       ev.event = IB_EVENT_CQ_ERR;
-                       cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
-               }
-               return;
-       }
-       if (cq->ip) {
-               wc->uqueue[head].wr_id = entry->wr_id;
-               wc->uqueue[head].status = entry->status;
-               wc->uqueue[head].opcode = entry->opcode;
-               wc->uqueue[head].vendor_err = entry->vendor_err;
-               wc->uqueue[head].byte_len = entry->byte_len;
-               wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
-               wc->uqueue[head].qp_num = entry->qp->qp_num;
-               wc->uqueue[head].src_qp = entry->src_qp;
-               wc->uqueue[head].wc_flags = entry->wc_flags;
-               wc->uqueue[head].pkey_index = entry->pkey_index;
-               wc->uqueue[head].slid = entry->slid;
-               wc->uqueue[head].sl = entry->sl;
-               wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
-               wc->uqueue[head].port_num = entry->port_num;
-               /* Make sure entry is written before the head index. */
-               smp_wmb();
-       } else
-               wc->kqueue[head] = *entry;
-       wc->head = next;
-
-       if (cq->notify == IB_CQ_NEXT_COMP ||
-           (cq->notify == IB_CQ_SOLICITED && solicited)) {
-               cq->notify = IB_CQ_NONE;
-               cq->triggered++;
-               /*
-                * This will cause send_complete() to be called in
-                * another thread.
-                */
-               tasklet_hi_schedule(&cq->comptask);
-       }
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-       if (entry->status != IB_WC_SUCCESS)
-               to_idev(cq->ibcq.device)->n_wqe_errs++;
-}
-
-/**
- * ipath_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context.  Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
-       struct ipath_cq *cq = to_icq(ibcq);
-       struct ipath_cq_wc *wc;
-       unsigned long flags;
-       int npolled;
-       u32 tail;
-
-       /* The kernel can only poll a kernel completion queue */
-       if (cq->ip) {
-               npolled = -EINVAL;
-               goto bail;
-       }
-
-       spin_lock_irqsave(&cq->lock, flags);
-
-       wc = cq->queue;
-       tail = wc->tail;
-       if (tail > (u32) cq->ibcq.cqe)
-               tail = (u32) cq->ibcq.cqe;
-       for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
-               if (tail == wc->head)
-                       break;
-               /* The kernel doesn't need a RMB since it has the lock. */
-               *entry = wc->kqueue[tail];
-               if (tail >= cq->ibcq.cqe)
-                       tail = 0;
-               else
-                       tail++;
-       }
-       wc->tail = tail;
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
-       return npolled;
-}
-
-static void send_complete(unsigned long data)
-{
-       struct ipath_cq *cq = (struct ipath_cq *)data;
-
-       /*
-        * The completion handler will most likely rearm the notification
-        * and poll for all pending entries.  If a new completion entry
-        * is added while we are in this routine, tasklet_hi_schedule()
-        * won't call us again until we return so we check triggered to
-        * see if we need to call the handler again.
-        */
-       for (;;) {
-               u8 triggered = cq->triggered;
-
-               cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
-
-               if (cq->triggered == triggered)
-                       return;
-       }
-}
-
-/**
- * ipath_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the InfiniPath driver
- * @udata: unused by the InfiniPath driver
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
-                             const struct ib_cq_init_attr *attr,
-                             struct ib_ucontext *context,
-                             struct ib_udata *udata)
-{
-       int entries = attr->cqe;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cq *cq;
-       struct ipath_cq_wc *wc;
-       struct ib_cq *ret;
-       u32 sz;
-
-       if (attr->flags)
-               return ERR_PTR(-EINVAL);
-
-       if (entries < 1 || entries > ib_ipath_max_cqes) {
-               ret = ERR_PTR(-EINVAL);
-               goto done;
-       }
-
-       /* Allocate the completion queue structure. */
-       cq = kmalloc(sizeof(*cq), GFP_KERNEL);
-       if (!cq) {
-               ret = ERR_PTR(-ENOMEM);
-               goto done;
-       }
-
-       /*
-        * Allocate the completion queue entries and head/tail pointers.
-        * This is allocated separately so that it can be resized and
-        * also mapped into user space.
-        * We need to use vmalloc() in order to support mmap and large
-        * numbers of entries.
-        */
-       sz = sizeof(*wc);
-       if (udata && udata->outlen >= sizeof(__u64))
-               sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
-       else
-               sz += sizeof(struct ib_wc) * (entries + 1);
-       wc = vmalloc_user(sz);
-       if (!wc) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_cq;
-       }
-
-       /*
-        * Return the address of the WC as the offset to mmap.
-        * See ipath_mmap() for details.
-        */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               int err;
-
-               cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
-               if (!cq->ip) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_wc;
-               }
-
-               err = ib_copy_to_udata(udata, &cq->ip->offset,
-                                      sizeof(cq->ip->offset));
-               if (err) {
-                       ret = ERR_PTR(err);
-                       goto bail_ip;
-               }
-       } else
-               cq->ip = NULL;
-
-       spin_lock(&dev->n_cqs_lock);
-       if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
-               spin_unlock(&dev->n_cqs_lock);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_ip;
-       }
-
-       dev->n_cqs_allocated++;
-       spin_unlock(&dev->n_cqs_lock);
-
-       if (cq->ip) {
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       /*
-        * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
-        * The number of entries should be >= the number requested or return
-        * an error.
-        */
-       cq->ibcq.cqe = entries;
-       cq->notify = IB_CQ_NONE;
-       cq->triggered = 0;
-       spin_lock_init(&cq->lock);
-       tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
-       wc->head = 0;
-       wc->tail = 0;
-       cq->queue = wc;
-
-       ret = &cq->ibcq;
-
-       goto done;
-
-bail_ip:
-       kfree(cq->ip);
-bail_wc:
-       vfree(wc);
-bail_cq:
-       kfree(cq);
-done:
-       return ret;
-}
-
-/**
- * ipath_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int ipath_destroy_cq(struct ib_cq *ibcq)
-{
-       struct ipath_ibdev *dev = to_idev(ibcq->device);
-       struct ipath_cq *cq = to_icq(ibcq);
-
-       tasklet_kill(&cq->comptask);
-       spin_lock(&dev->n_cqs_lock);
-       dev->n_cqs_allocated--;
-       spin_unlock(&dev->n_cqs_lock);
-       if (cq->ip)
-               kref_put(&cq->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(cq->queue);
-       kfree(cq);
-
-       return 0;
-}
-
-/**
- * ipath_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context.  Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
-       struct ipath_cq *cq = to_icq(ibcq);
-       unsigned long flags;
-       int ret = 0;
-
-       spin_lock_irqsave(&cq->lock, flags);
-       /*
-        * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
-        * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
-        */
-       if (cq->notify != IB_CQ_NEXT_COMP)
-               cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
-       if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
-           cq->queue->head != cq->queue->tail)
-               ret = 1;
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-       return ret;
-}
-
-/**
- * ipath_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
-       struct ipath_cq *cq = to_icq(ibcq);
-       struct ipath_cq_wc *old_wc;
-       struct ipath_cq_wc *wc;
-       u32 head, tail, n;
-       int ret;
-       u32 sz;
-
-       if (cqe < 1 || cqe > ib_ipath_max_cqes) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /*
-        * Need to use vmalloc() if we want to support large #s of entries.
-        */
-       sz = sizeof(*wc);
-       if (udata && udata->outlen >= sizeof(__u64))
-               sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
-       else
-               sz += sizeof(struct ib_wc) * (cqe + 1);
-       wc = vmalloc_user(sz);
-       if (!wc) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       /* Check that we can write the offset to mmap. */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               __u64 offset = 0;
-
-               ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
-               if (ret)
-                       goto bail_free;
-       }
-
-       spin_lock_irq(&cq->lock);
-       /*
-        * Make sure head and tail are sane since they
-        * might be user writable.
-        */
-       old_wc = cq->queue;
-       head = old_wc->head;
-       if (head > (u32) cq->ibcq.cqe)
-               head = (u32) cq->ibcq.cqe;
-       tail = old_wc->tail;
-       if (tail > (u32) cq->ibcq.cqe)
-               tail = (u32) cq->ibcq.cqe;
-       if (head < tail)
-               n = cq->ibcq.cqe + 1 + head - tail;
-       else
-               n = head - tail;
-       if (unlikely((u32)cqe < n)) {
-               ret = -EINVAL;
-               goto bail_unlock;
-       }
-       for (n = 0; tail != head; n++) {
-               if (cq->ip)
-                       wc->uqueue[n] = old_wc->uqueue[tail];
-               else
-                       wc->kqueue[n] = old_wc->kqueue[tail];
-               if (tail == (u32) cq->ibcq.cqe)
-                       tail = 0;
-               else
-                       tail++;
-       }
-       cq->ibcq.cqe = cqe;
-       wc->head = n;
-       wc->tail = 0;
-       cq->queue = wc;
-       spin_unlock_irq(&cq->lock);
-
-       vfree(old_wc);
-
-       if (cq->ip) {
-               struct ipath_ibdev *dev = to_idev(ibcq->device);
-               struct ipath_mmap_info *ip = cq->ip;
-
-               ipath_update_mmap_info(dev, ip, sz, wc);
-
-               /*
-                * Return the offset to mmap.
-                * See ipath_mmap() for details.
-                */
-               if (udata && udata->outlen >= sizeof(__u64)) {
-                       ret = ib_copy_to_udata(udata, &ip->offset,
-                                              sizeof(ip->offset));
-                       if (ret)
-                               goto bail;
-               }
-
-               spin_lock_irq(&dev->pending_lock);
-               if (list_empty(&ip->pending_mmaps))
-                       list_add(&ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       ret = 0;
-       goto bail;
-
-bail_unlock:
-       spin_unlock_irq(&cq->lock);
-bail_free:
-       vfree(wc);
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_debug.h b/drivers/staging/rdma/ipath/ipath_debug.h
deleted file mode 100644 (file)
index 65926cd..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_DEBUG_H
-#define _IPATH_DEBUG_H
-
-#ifndef _IPATH_DEBUGGING       /* debugging enabled or not */
-#define _IPATH_DEBUGGING 1
-#endif
-
-#if _IPATH_DEBUGGING
-
-/*
- * Mask values for debugging.  The scheme allows us to compile out any
- * of the debug tracing stuff, and if compiled in, to enable or disable
- * dynamically.  This can be set at modprobe time also:
- *      modprobe infinipath.ko infinipath_debug=7
- */
-
-#define __IPATH_INFO        0x1        /* generic low verbosity stuff */
-#define __IPATH_DBG         0x2        /* generic debug */
-#define __IPATH_TRSAMPLE    0x8        /* generate trace buffer sample entries */
-/* leave some low verbosity spots open */
-#define __IPATH_VERBDBG     0x40       /* very verbose debug */
-#define __IPATH_PKTDBG      0x80       /* print packet data */
-/* print process startup (init)/exit messages */
-#define __IPATH_PROCDBG     0x100
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG       0x200
-#define __IPATH_ERRPKTDBG   0x400
-#define __IPATH_USER_SEND   0x1000     /* use user mode send */
-#define __IPATH_KERNEL_SEND 0x2000     /* use kernel mode send */
-#define __IPATH_EPKTDBG     0x4000     /* print ethernet packet data */
-#define __IPATH_IPATHDBG    0x10000    /* Ethernet (IPATH) gen debug */
-#define __IPATH_IPATHWARN   0x20000    /* Ethernet (IPATH) warnings */
-#define __IPATH_IPATHERR    0x40000    /* Ethernet (IPATH) errors */
-#define __IPATH_IPATHPD     0x80000    /* Ethernet (IPATH) packet dump */
-#define __IPATH_IPATHTABLE  0x100000   /* Ethernet (IPATH) table dump */
-#define __IPATH_LINKVERBDBG 0x200000   /* very verbose linkchange debug */
-
-#else                          /* _IPATH_DEBUGGING */
-
-/*
- * define all of these even with debugging off, for the few places that do
- * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
- * compiler eliminate the code
- */
-
-#define __IPATH_INFO      0x0  /* generic low verbosity stuff */
-#define __IPATH_DBG       0x0  /* generic debug */
-#define __IPATH_TRSAMPLE  0x0  /* generate trace buffer sample entries */
-#define __IPATH_VERBDBG   0x0  /* very verbose debug */
-#define __IPATH_PKTDBG    0x0  /* print packet data */
-#define __IPATH_PROCDBG   0x0  /* process startup (init)/exit messages */
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG     0x0
-#define __IPATH_EPKTDBG   0x0  /* print ethernet packet data */
-#define __IPATH_IPATHDBG  0x0  /* Ethernet (IPATH) table dump on */
-#define __IPATH_IPATHWARN 0x0  /* Ethernet (IPATH) warnings on   */
-#define __IPATH_IPATHERR  0x0  /* Ethernet (IPATH) errors on   */
-#define __IPATH_IPATHPD   0x0  /* Ethernet (IPATH) packet dump on   */
-#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on   */
-#define __IPATH_LINKVERBDBG 0x0        /* very verbose linkchange debug */
-
-#endif                         /* _IPATH_DEBUGGING */
-
-#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
-
-#endif                         /* _IPATH_DEBUG_H */
diff --git a/drivers/staging/rdma/ipath/ipath_diag.c b/drivers/staging/rdma/ipath/ipath_diag.c
deleted file mode 100644 (file)
index 45802e9..0000000
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions.  It is accessed by
- * opening the ipath_diag device, normally minor number 129.  Diagnostic use
- * of the InfiniPath chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/export.h>
-#include <asm/uaccess.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-int ipath_diag_inuse;
-static int diag_set_link;
-
-static int ipath_diag_open(struct inode *in, struct file *fp);
-static int ipath_diag_release(struct inode *in, struct file *fp);
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
-                              size_t count, loff_t *off);
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
-                               size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
-       .owner = THIS_MODULE,
-       .write = ipath_diag_write,
-       .read = ipath_diag_read,
-       .open = ipath_diag_open,
-       .release = ipath_diag_release,
-       .llseek = default_llseek,
-};
-
-static ssize_t ipath_diagpkt_write(struct file *fp,
-                                  const char __user *data,
-                                  size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
-       .owner = THIS_MODULE,
-       .write = ipath_diagpkt_write,
-       .llseek = noop_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_dev;
-
-int ipath_diag_add(struct ipath_devdata *dd)
-{
-       char name[16];
-       int ret = 0;
-
-       if (atomic_inc_return(&diagpkt_count) == 1) {
-               ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
-                                     "ipath_diagpkt", &diagpkt_file_ops,
-                                     &diagpkt_cdev, &diagpkt_dev);
-
-               if (ret) {
-                       ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
-                                     "device: %d", ret);
-                       goto done;
-               }
-       }
-
-       snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
-
-       ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
-                             &diag_file_ops, &dd->diag_cdev,
-                             &dd->diag_dev);
-       if (ret)
-               ipath_dev_err(dd, "Couldn't create %s device: %d",
-                             name, ret);
-
-done:
-       return ret;
-}
-
-void ipath_diag_remove(struct ipath_devdata *dd)
-{
-       if (atomic_dec_and_test(&diagpkt_count))
-               ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
-
-       ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
-}
-
-/**
- * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip.  This is usually used for a single qword
- *
- * NOTE:  This assumes the chip address is 64-bit aligned.
- */
-static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
-                            const void __iomem *caddr, size_t count)
-{
-       const u64 __iomem *reg_addr = caddr;
-       const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
-       int ret;
-
-       /* not very efficient, but it works for now */
-       if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       while (reg_addr < reg_end) {
-               u64 data = readq(reg_addr);
-               if (copy_to_user(uaddr, &data, sizeof(u64))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               reg_addr++;
-               uaddr += sizeof(u64);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE:  This assumes the chip address is 64-bit aligned.
- */
-
-static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
-                             const void __user *uaddr, size_t count)
-{
-       u64 __iomem *reg_addr = caddr;
-       const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
-       int ret;
-
-       /* not very efficient, but it works for now */
-       if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       while (reg_addr < reg_end) {
-               u64 data;
-               if (copy_from_user(&data, uaddr, sizeof(data))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               writeq(data, reg_addr);
-
-               reg_addr++;
-               uaddr += sizeof(u64);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
-                            const void __iomem *caddr, size_t count)
-{
-       const u32 __iomem *reg_addr = caddr;
-       const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
-       int ret;
-
-       if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
-           reg_end > (u32 __iomem *) dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       /* not very efficient, but it works for now */
-       while (reg_addr < reg_end) {
-               u32 data = readl(reg_addr);
-               if (copy_to_user(uaddr, &data, sizeof(data))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-
-               reg_addr++;
-               uaddr += sizeof(u32);
-
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
-                             const void __user *uaddr, size_t count)
-{
-       u32 __iomem *reg_addr = caddr;
-       const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
-       int ret;
-
-       if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
-           reg_end > (u32 __iomem *) dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       while (reg_addr < reg_end) {
-               u32 data;
-               if (copy_from_user(&data, uaddr, sizeof(data))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               writel(data, reg_addr);
-
-               reg_addr++;
-               uaddr += sizeof(u32);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-static int ipath_diag_open(struct inode *in, struct file *fp)
-{
-       int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
-       struct ipath_devdata *dd;
-       int ret;
-
-       mutex_lock(&ipath_mutex);
-
-       if (ipath_diag_inuse) {
-               ret = -EBUSY;
-               goto bail;
-       }
-
-       dd = ipath_lookup(unit);
-
-       if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
-           !dd->ipath_kregbase) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       fp->private_data = dd;
-       ipath_diag_inuse = -2;
-       diag_set_link = 0;
-       ret = 0;
-
-       /* Only expose a way to reset the device if we
-          make it into diag mode. */
-       ipath_expose_reset(&dd->pcidev->dev);
-
-bail:
-       mutex_unlock(&ipath_mutex);
-
-       return ret;
-}
-
-/**
- * ipath_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: ipath_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t ipath_diagpkt_write(struct file *fp,
-                                  const char __user *data,
-                                  size_t count, loff_t *off)
-{
-       u32 __iomem *piobuf;
-       u32 plen, pbufn, maxlen_reserve;
-       struct ipath_diag_pkt odp;
-       struct ipath_diag_xpkt dp;
-       u32 *tmpbuf = NULL;
-       struct ipath_devdata *dd;
-       ssize_t ret = 0;
-       u64 val;
-       u32 l_state, lt_state; /* LinkState, LinkTrainingState */
-
-
-       if (count == sizeof(dp)) {
-               if (copy_from_user(&dp, data, sizeof(dp))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-       } else if (count == sizeof(odp)) {
-               if (copy_from_user(&odp, data, sizeof(odp))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               dp.len = odp.len;
-               dp.unit = odp.unit;
-               dp.data = odp.data;
-               dp.pbc_wd = 0;
-       } else {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /* send count must be an exact number of dwords */
-       if (dp.len & 3) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       plen = dp.len >> 2;
-
-       dd = ipath_lookup(dp.unit);
-       if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
-           !dd->ipath_kregbase) {
-               ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
-                          dp.unit);
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       if (ipath_diag_inuse && !diag_set_link &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               diag_set_link = 1;
-               ipath_cdbg(VERBOSE, "Trying to set to set link active for "
-                          "diag pkt\n");
-               ipath_set_linkstate(dd, IPATH_IB_LINKARM);
-               ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-       }
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               /* no hardware, freeze, etc. */
-               ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
-               ret = -ENODEV;
-               goto bail;
-       }
-       /*
-        * Want to skip check for l_state if using custom PBC,
-        * because we might be trying to force an SM packet out.
-        * first-cut, skip _all_ state checking in that case.
-        */
-       val = ipath_ib_state(dd, dd->ipath_lastibcstat);
-       lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-       l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
-       if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
-           (val != dd->ib_init && val != dd->ib_arm &&
-           val != dd->ib_active))) {
-               ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
-                          dd->ipath_unit, (unsigned long long) val);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /*
-        * need total length before first word written, plus 2 Dwords. One Dword
-        * is for padding so we get the full user data when not aligned on
-        * a word boundary. The other Dword is to make sure we have room for the
-        * ICRC which gets tacked on later.
-        */
-       maxlen_reserve = 2 * sizeof(u32);
-       if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
-               ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
-                         dp.len, dd->ipath_ibmaxlen);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       plen = sizeof(u32) + dp.len;
-
-       tmpbuf = vmalloc(plen);
-       if (!tmpbuf) {
-               dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
-                        "failing\n");
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       if (copy_from_user(tmpbuf,
-                          (const void __user *) (unsigned long) dp.data,
-                          dp.len)) {
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       plen >>= 2;             /* in dwords */
-
-       piobuf = ipath_getpiobuf(dd, plen, &pbufn);
-       if (!piobuf) {
-               ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
-                          dd->ipath_unit);
-               ret = -EBUSY;
-               goto bail;
-       }
-       /* disarm it just to be extra sure */
-       ipath_disarm_piobufs(dd, pbufn, 1);
-
-       if (ipath_debug & __IPATH_PKTDBG)
-               ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
-                          dd->ipath_unit, plen - 1, pbufn);
-
-       if (dp.pbc_wd == 0)
-               dp.pbc_wd = plen;
-       writeq(dp.pbc_wd, piobuf);
-       /*
-        * Copy all by the trigger word, then flush, so it's written
-        * to chip before trigger word, then write trigger word, then
-        * flush again, so packet is sent.
-        */
-       if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
-               ipath_flush_wc();
-               __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
-               ipath_flush_wc();
-               __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
-       } else
-               __iowrite32_copy(piobuf + 2, tmpbuf, plen);
-
-       ipath_flush_wc();
-
-       ret = sizeof(dp);
-
-bail:
-       vfree(tmpbuf);
-       return ret;
-}
-
-static int ipath_diag_release(struct inode *in, struct file *fp)
-{
-       mutex_lock(&ipath_mutex);
-       ipath_diag_inuse = 0;
-       fp->private_data = NULL;
-       mutex_unlock(&ipath_mutex);
-       return 0;
-}
-
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
-                              size_t count, loff_t *off)
-{
-       struct ipath_devdata *dd = fp->private_data;
-       void __iomem *kreg_base;
-       ssize_t ret;
-
-       kreg_base = dd->ipath_kregbase;
-
-       if (count == 0)
-               ret = 0;
-       else if ((count % 4) || (*off % 4))
-               /* address or length is not 32-bit aligned, hence invalid */
-               ret = -EINVAL;
-       else if (ipath_diag_inuse < 1 && (*off || count != 8))
-               ret = -EINVAL;  /* prevent cat /dev/ipath_diag* */
-       else if ((count % 8) || (*off % 8))
-               /* address or length not 64-bit aligned; do 32-bit reads */
-               ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
-       else
-               ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
-
-       if (ret >= 0) {
-               *off += count;
-               ret = count;
-               if (ipath_diag_inuse == -2)
-                       ipath_diag_inuse++;
-       }
-
-       return ret;
-}
-
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
-                               size_t count, loff_t *off)
-{
-       struct ipath_devdata *dd = fp->private_data;
-       void __iomem *kreg_base;
-       ssize_t ret;
-
-       kreg_base = dd->ipath_kregbase;
-
-       if (count == 0)
-               ret = 0;
-       else if ((count % 4) || (*off % 4))
-               /* address or length is not 32-bit aligned, hence invalid */
-               ret = -EINVAL;
-       else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
-                ipath_diag_inuse == -2)  /* read qw off 0, write qw off 0 */
-               ret = -EINVAL;  /* before any other write allowed */
-       else if ((count % 8) || (*off % 8))
-               /* address or length not 64-bit aligned; do 32-bit writes */
-               ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
-       else
-               ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
-
-       if (ret >= 0) {
-               *off += count;
-               ret = count;
-               if (ipath_diag_inuse == -1)
-                       ipath_diag_inuse = 1; /* all read/write OK now */
-       }
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_dma.c b/drivers/staging/rdma/ipath/ipath_dma.c
deleted file mode 100644 (file)
index 123a8c0..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/scatterlist.h>
-#include <linux/gfp.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_verbs.h"
-
-#define BAD_DMA_ADDRESS ((u64) 0)
-
-/*
- * The following functions implement driver specific replacements
- * for the ib_dma_*() functions.
- *
- * These functions return kernel virtual addresses instead of
- * device bus addresses since the driver uses the CPU to copy
- * data instead of using hardware DMA.
- */
-
-static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-       return dma_addr == BAD_DMA_ADDRESS;
-}
-
-static u64 ipath_dma_map_single(struct ib_device *dev,
-                               void *cpu_addr, size_t size,
-                               enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       return (u64) cpu_addr;
-}
-
-static void ipath_dma_unmap_single(struct ib_device *dev,
-                                  u64 addr, size_t size,
-                                  enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static u64 ipath_dma_map_page(struct ib_device *dev,
-                             struct page *page,
-                             unsigned long offset,
-                             size_t size,
-                             enum dma_data_direction direction)
-{
-       u64 addr;
-
-       BUG_ON(!valid_dma_direction(direction));
-
-       if (offset + size > PAGE_SIZE) {
-               addr = BAD_DMA_ADDRESS;
-               goto done;
-       }
-
-       addr = (u64) page_address(page);
-       if (addr)
-               addr += offset;
-       /* TODO: handle highmem pages */
-
-done:
-       return addr;
-}
-
-static void ipath_dma_unmap_page(struct ib_device *dev,
-                                u64 addr, size_t size,
-                                enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
-                       int nents, enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       u64 addr;
-       int i;
-       int ret = nents;
-
-       BUG_ON(!valid_dma_direction(direction));
-
-       for_each_sg(sgl, sg, nents, i) {
-               addr = (u64) page_address(sg_page(sg));
-               /* TODO: handle highmem pages */
-               if (!addr) {
-                       ret = 0;
-                       break;
-               }
-               sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-               sg->dma_length = sg->length;
-#endif
-       }
-       return ret;
-}
-
-static void ipath_unmap_sg(struct ib_device *dev,
-                          struct scatterlist *sg, int nents,
-                          enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static void ipath_sync_single_for_cpu(struct ib_device *dev,
-                                     u64 addr,
-                                     size_t size,
-                                     enum dma_data_direction dir)
-{
-}
-
-static void ipath_sync_single_for_device(struct ib_device *dev,
-                                        u64 addr,
-                                        size_t size,
-                                        enum dma_data_direction dir)
-{
-}
-
-static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
-                                     u64 *dma_handle, gfp_t flag)
-{
-       struct page *p;
-       void *addr = NULL;
-
-       p = alloc_pages(flag, get_order(size));
-       if (p)
-               addr = page_address(p);
-       if (dma_handle)
-               *dma_handle = (u64) addr;
-       return addr;
-}
-
-static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
-                                   void *cpu_addr, u64 dma_handle)
-{
-       free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
-       .mapping_error = ipath_mapping_error,
-       .map_single = ipath_dma_map_single,
-       .unmap_single = ipath_dma_unmap_single,
-       .map_page = ipath_dma_map_page,
-       .unmap_page = ipath_dma_unmap_page,
-       .map_sg = ipath_map_sg,
-       .unmap_sg = ipath_unmap_sg,
-       .sync_single_for_cpu = ipath_sync_single_for_cpu,
-       .sync_single_for_device = ipath_sync_single_for_device,
-       .alloc_coherent = ipath_dma_alloc_coherent,
-       .free_coherent = ipath_dma_free_coherent
-};
diff --git a/drivers/staging/rdma/ipath/ipath_driver.c b/drivers/staging/rdma/ipath/ipath_driver.c
deleted file mode 100644 (file)
index 2ab22f9..0000000
+++ /dev/null
@@ -1,2784 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#ifdef CONFIG_X86_64
-#include <asm/pat.h>
-#endif
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-
-static void ipath_update_pio_bufs(struct ipath_devdata *);
-
-const char *ipath_get_unit_name(int unit)
-{
-       static char iname[16];
-       snprintf(iname, sizeof iname, "infinipath%u", unit);
-       return iname;
-}
-
-#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
-#define PFX IPATH_DRV_NAME ": "
-
-/*
- * The size has to be longer than this string, so we can append
- * board/chip information to it in the init code.
- */
-const char ib_ipath_version[] = IPATH_IDSTR "\n";
-
-static struct idr unit_table;
-DEFINE_SPINLOCK(ipath_devs_lock);
-LIST_HEAD(ipath_dev_list);
-
-wait_queue_head_t ipath_state_wait;
-
-unsigned ipath_debug = __IPATH_INFO;
-
-module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "mask for debug prints");
-EXPORT_SYMBOL_GPL(ipath_debug);
-
-unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
-module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
-MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
-
-static unsigned ipath_hol_timeout_ms = 13000;
-module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
-       "duration of user app suspension after link failure");
-
-unsigned ipath_linkrecovery = 1;
-module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic InfiniPath driver");
-
-/*
- * Table to translate the LINKTRAININGSTATE portion of
- * IBCStatus to a human-readable form.
- */
-const char *ipath_ibcstatus_str[] = {
-       "Disabled",
-       "LinkUp",
-       "PollActive",
-       "PollQuiet",
-       "SleepDelay",
-       "SleepQuiet",
-       "LState6",              /* unused */
-       "LState7",              /* unused */
-       "CfgDebounce",
-       "CfgRcvfCfg",
-       "CfgWaitRmt",
-       "CfgIdle",
-       "RecovRetrain",
-       "CfgTxRevLane",         /* unused before IBA7220 */
-       "RecovWaitRmt",
-       "RecovIdle",
-       /* below were added for IBA7220 */
-       "CfgEnhanced",
-       "CfgTest",
-       "CfgWaitRmtTest",
-       "CfgWaitCfgEnhanced",
-       "SendTS_T",
-       "SendTstIdles",
-       "RcvTS_T",
-       "SendTst_TS1s",
-       "LTState18", "LTState19", "LTState1A", "LTState1B",
-       "LTState1C", "LTState1D", "LTState1E", "LTState1F"
-};
-
-static void ipath_remove_one(struct pci_dev *);
-static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
-
-/* Only needed for registration, nothing else needs this info */
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-
-/* Number of seconds before our card status check...  */
-#define STATUS_TIMEOUT 60
-
-static const struct pci_device_id ipath_pci_tbl[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
-       { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
-
-static struct pci_driver ipath_driver = {
-       .name = IPATH_DRV_NAME,
-       .probe = ipath_init_one,
-       .remove = ipath_remove_one,
-       .id_table = ipath_pci_tbl,
-       .driver = {
-               .groups = ipath_driver_attr_groups,
-       },
-};
-
-static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
-                            u32 *bar0, u32 *bar1)
-{
-       int ret;
-
-       ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
-       if (ret)
-               ipath_dev_err(dd, "failed to read bar0 before enable: "
-                             "error %d\n", -ret);
-
-       ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
-       if (ret)
-               ipath_dev_err(dd, "failed to read bar1 before enable: "
-                             "error %d\n", -ret);
-
-       ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
-}
-
-static void ipath_free_devdata(struct pci_dev *pdev,
-                              struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       pci_set_drvdata(pdev, NULL);
-
-       if (dd->ipath_unit != -1) {
-               spin_lock_irqsave(&ipath_devs_lock, flags);
-               idr_remove(&unit_table, dd->ipath_unit);
-               list_del(&dd->ipath_list);
-               spin_unlock_irqrestore(&ipath_devs_lock, flags);
-       }
-       vfree(dd);
-}
-
-static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
-{
-       unsigned long flags;
-       struct ipath_devdata *dd;
-       int ret;
-
-       dd = vzalloc(sizeof(*dd));
-       if (!dd) {
-               dd = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-       dd->ipath_unit = -1;
-
-       idr_preload(GFP_KERNEL);
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate unit ID: error %d\n", -ret);
-               ipath_free_devdata(pdev, dd);
-               dd = ERR_PTR(ret);
-               goto bail_unlock;
-       }
-       dd->ipath_unit = ret;
-
-       dd->pcidev = pdev;
-       pci_set_drvdata(pdev, dd);
-
-       list_add(&dd->ipath_list, &ipath_dev_list);
-
-bail_unlock:
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-       idr_preload_end();
-bail:
-       return dd;
-}
-
-static inline struct ipath_devdata *__ipath_lookup(int unit)
-{
-       return idr_find(&unit_table, unit);
-}
-
-struct ipath_devdata *ipath_lookup(int unit)
-{
-       struct ipath_devdata *dd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-       dd = __ipath_lookup(unit);
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-       return dd;
-}
-
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
-{
-       int nunits, npresent, nup;
-       struct ipath_devdata *dd;
-       unsigned long flags;
-       int maxports;
-
-       nunits = npresent = nup = maxports = 0;
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-               nunits++;
-               if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
-                       npresent++;
-               if (dd->ipath_lid &&
-                   !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
-                                        | IPATH_LINKUNK)))
-                       nup++;
-               if (dd->ipath_cfgports > maxports)
-                       maxports = dd->ipath_cfgports;
-       }
-
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-       if (npresentp)
-               *npresentp = npresent;
-       if (nupp)
-               *nupp = nup;
-       if (maxportsp)
-               *maxportsp = maxports;
-
-       return nunits;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining.  If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
-{
-       return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
-{
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration.  Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire).   On chips that use an address-based
- * trigger to send packets to the wire, this is easy.  On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void ipath_verify_pioperf(struct ipath_devdata *dd)
-{
-       u32 pbnum, cnt, lcnt;
-       u32 __iomem *piobuf;
-       u32 *addr;
-       u64 msecs, emsecs;
-
-       piobuf = ipath_getpiobuf(dd, 0, &pbnum);
-       if (!piobuf) {
-               dev_info(&dd->pcidev->dev,
-                       "No PIObufs for checking perf, skipping\n");
-               return;
-       }
-
-       /*
-        * Enough to give us a reasonable test, less than piobuf size, and
-        * likely multiple of store buffer length.
-        */
-       cnt = 1024;
-
-       addr = vmalloc(cnt);
-       if (!addr) {
-               dev_info(&dd->pcidev->dev,
-                       "Couldn't get memory for checking PIO perf,"
-                       " skipping\n");
-               goto done;
-       }
-
-       preempt_disable();  /* we want reasonably accurate elapsed time */
-       msecs = 1 + jiffies_to_msecs(jiffies);
-       for (lcnt = 0; lcnt < 10000U; lcnt++) {
-               /* wait until we cross msec boundary */
-               if (jiffies_to_msecs(jiffies) >= msecs)
-                       break;
-               udelay(1);
-       }
-
-       ipath_disable_armlaunch(dd);
-
-       /*
-        * length 0, no dwords actually sent, and mark as VL15
-        * on chips where that may matter (due to IB flowcontrol)
-        */
-       if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
-               writeq(1UL << 63, piobuf);
-       else
-               writeq(0, piobuf);
-       ipath_flush_wc();
-
-       /*
-        * this is only roughly accurate, since even with preempt we
-        * still take interrupts that could take a while.   Running for
-        * >= 5 msec seems to get us "close enough" to accurate values
-        */
-       msecs = jiffies_to_msecs(jiffies);
-       for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
-               __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
-               emsecs = jiffies_to_msecs(jiffies) - msecs;
-       }
-
-       /* 1 GiB/sec, slightly over IB SDR line rate */
-       if (lcnt < (emsecs * 1024U))
-               ipath_dev_err(dd,
-                       "Performance problem: bandwidth to PIO buffers is "
-                       "only %u MiB/sec\n",
-                       lcnt / (u32) emsecs);
-       else
-               ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
-                       lcnt / (u32) emsecs);
-
-       preempt_enable();
-
-       vfree(addr);
-
-done:
-       /* disarm piobuf, so it's available again */
-       ipath_disarm_piobufs(dd, pbnum, 1);
-       ipath_enable_armlaunch(dd);
-}
-
-static void cleanup_device(struct ipath_devdata *dd);
-
-static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int ret, len, j;
-       struct ipath_devdata *dd;
-       unsigned long long addr;
-       u32 bar0 = 0, bar1 = 0;
-
-#ifdef CONFIG_X86_64
-       if (pat_enabled()) {
-               pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
-               ret = -ENODEV;
-               goto bail;
-       }
-#endif
-
-       dd = ipath_alloc_devdata(pdev);
-       if (IS_ERR(dd)) {
-               ret = PTR_ERR(dd);
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate devdata: error %d\n", -ret);
-               goto bail;
-       }
-
-       ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
-
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               /* This can happen iff:
-                *
-                * We did a chip reset, and then failed to reprogram the
-                * BAR, or the chip reset due to an internal error.  We then
-                * unloaded the driver and reloaded it.
-                *
-                * Both reset cases set the BAR back to initial state.  For
-                * the latter case, the AER sticky error bit at offset 0x718
-                * should be set, but the Linux kernel doesn't yet know
-                * about that, it appears.  If the original BAR was retained
-                * in the kernel data structures, this may be OK.
-                */
-               ipath_dev_err(dd, "enable unit %d failed: error %d\n",
-                             dd->ipath_unit, -ret);
-               goto bail_devdata;
-       }
-       addr = pci_resource_start(pdev, 0);
-       len = pci_resource_len(pdev, 0);
-       ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
-                  "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
-                  ent->device, ent->driver_data);
-
-       read_bars(dd, pdev, &bar0, &bar1);
-
-       if (!bar1 && !(bar0 & ~0xf)) {
-               if (addr) {
-                       dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
-                                "rewriting as %llx\n", addr);
-                       ret = pci_write_config_dword(
-                               pdev, PCI_BASE_ADDRESS_0, addr);
-                       if (ret) {
-                               ipath_dev_err(dd, "rewrite of BAR0 "
-                                             "failed: err %d\n", -ret);
-                               goto bail_disable;
-                       }
-                       ret = pci_write_config_dword(
-                               pdev, PCI_BASE_ADDRESS_1, addr >> 32);
-                       if (ret) {
-                               ipath_dev_err(dd, "rewrite of BAR1 "
-                                             "failed: err %d\n", -ret);
-                               goto bail_disable;
-                       }
-               } else {
-                       ipath_dev_err(dd, "BAR is 0 (probable RESET), "
-                                     "not usable until reboot\n");
-                       ret = -ENODEV;
-                       goto bail_disable;
-               }
-       }
-
-       ret = pci_request_regions(pdev, IPATH_DRV_NAME);
-       if (ret) {
-               dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
-                        "err %d\n", dd->ipath_unit, -ret);
-               goto bail_disable;
-       }
-
-       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (ret) {
-               /*
-                * if the 64 bit setup fails, try 32 bit.  Some systems
-                * do not setup 64 bit maps on systems with 2GB or less
-                * memory installed.
-                */
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (ret) {
-                       dev_info(&pdev->dev,
-                               "Unable to set DMA mask for unit %u: %d\n",
-                               dd->ipath_unit, ret);
-                       goto bail_regions;
-               } else {
-                       ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
-                       ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-                       if (ret)
-                               dev_info(&pdev->dev,
-                                       "Unable to set DMA consistent mask "
-                                       "for unit %u: %d\n",
-                                       dd->ipath_unit, ret);
-
-               }
-       } else {
-               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (ret)
-                       dev_info(&pdev->dev,
-                               "Unable to set DMA consistent mask "
-                               "for unit %u: %d\n",
-                               dd->ipath_unit, ret);
-       }
-
-       pci_set_master(pdev);
-
-       /*
-        * Save BARs to rewrite after device reset.  Save all 64 bits of
-        * BAR, just in case.
-        */
-       dd->ipath_pcibar0 = addr;
-       dd->ipath_pcibar1 = addr >> 32;
-       dd->ipath_deviceid = ent->device;       /* save for later use */
-       dd->ipath_vendorid = ent->vendor;
-
-       /* setup the chip-specific functions, as early as possible. */
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INFINIPATH_HT:
-               ipath_init_iba6110_funcs(dd);
-               break;
-
-       default:
-               ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
-                             "failing\n", ent->device);
-               return -ENODEV;
-       }
-
-       for (j = 0; j < 6; j++) {
-               if (!pdev->resource[j].start)
-                       continue;
-               ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
-                          j, &pdev->resource[j],
-                          (unsigned long long)pci_resource_len(pdev, j));
-       }
-
-       if (!addr) {
-               ipath_dev_err(dd, "No valid address in BAR 0!\n");
-               ret = -ENODEV;
-               goto bail_regions;
-       }
-
-       dd->ipath_pcirev = pdev->revision;
-
-#if defined(__powerpc__)
-       /* There isn't a generic way to specify writethrough mappings */
-       dd->ipath_kregbase = __ioremap(addr, len,
-               (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
-#else
-       /* XXX: split this properly to enable on PAT */
-       dd->ipath_kregbase = ioremap_nocache(addr, len);
-#endif
-
-       if (!dd->ipath_kregbase) {
-               ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
-                         addr);
-               ret = -ENOMEM;
-               goto bail_iounmap;
-       }
-       dd->ipath_kregend = (u64 __iomem *)
-               ((void __iomem *)dd->ipath_kregbase + len);
-       dd->ipath_physaddr = addr;      /* used for io_remap, etc. */
-       /* for user mmap */
-       ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
-                  addr, dd->ipath_kregbase);
-
-       if (dd->ipath_f_bus(dd, pdev))
-               ipath_dev_err(dd, "Failed to setup config space; "
-                             "continuing anyway\n");
-
-       /*
-        * set up our interrupt handler; IRQF_SHARED probably not needed,
-        * since MSI interrupts shouldn't be shared but won't  hurt for now.
-        * check 0 irq after we return from chip-specific bus setup, since
-        * that can affect this due to setup
-        */
-       if (!dd->ipath_irq)
-               ipath_dev_err(dd, "irq is 0, BIOS error?  Interrupts won't "
-                             "work\n");
-       else {
-               ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
-                                 IPATH_DRV_NAME, dd);
-               if (ret) {
-                       ipath_dev_err(dd, "Couldn't setup irq handler, "
-                                     "irq=%d: %d\n", dd->ipath_irq, ret);
-                       goto bail_iounmap;
-               }
-       }
-
-       ret = ipath_init_chip(dd, 0);   /* do the chip-specific init */
-       if (ret)
-               goto bail_irqsetup;
-
-       ret = ipath_enable_wc(dd);
-
-       if (ret)
-               ret = 0;
-
-       ipath_verify_pioperf(dd);
-
-       ipath_device_create_group(&pdev->dev, dd);
-       ipathfs_add_device(dd);
-       ipath_user_add(dd);
-       ipath_diag_add(dd);
-       ipath_register_ib_device(dd);
-
-       goto bail;
-
-bail_irqsetup:
-       cleanup_device(dd);
-
-       if (dd->ipath_irq)
-               dd->ipath_f_free_irq(dd);
-
-       if (dd->ipath_f_cleanup)
-               dd->ipath_f_cleanup(dd);
-
-bail_iounmap:
-       iounmap((volatile void __iomem *) dd->ipath_kregbase);
-
-bail_regions:
-       pci_release_regions(pdev);
-
-bail_disable:
-       pci_disable_device(pdev);
-
-bail_devdata:
-       ipath_free_devdata(pdev, dd);
-
-bail:
-       return ret;
-}
-
-static void cleanup_device(struct ipath_devdata *dd)
-{
-       int port;
-       struct ipath_portdata **tmp;
-       unsigned long flags;
-
-       if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
-               /* can't do anything more with chip; needs re-init */
-               *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
-               if (dd->ipath_kregbase) {
-                       /*
-                        * if we haven't already cleaned up before these are
-                        * to ensure any register reads/writes "fail" until
-                        * re-init
-                        */
-                       dd->ipath_kregbase = NULL;
-                       dd->ipath_uregbase = 0;
-                       dd->ipath_sregbase = 0;
-                       dd->ipath_cregbase = 0;
-                       dd->ipath_kregsize = 0;
-               }
-               ipath_disable_wc(dd);
-       }
-
-       if (dd->ipath_spectriggerhit)
-               dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
-                        dd->ipath_spectriggerhit);
-
-       if (dd->ipath_pioavailregs_dma) {
-               dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-                                 (void *) dd->ipath_pioavailregs_dma,
-                                 dd->ipath_pioavailregs_phys);
-               dd->ipath_pioavailregs_dma = NULL;
-       }
-       if (dd->ipath_dummy_hdrq) {
-               dma_free_coherent(&dd->pcidev->dev,
-                       dd->ipath_pd[0]->port_rcvhdrq_size,
-                       dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
-               dd->ipath_dummy_hdrq = NULL;
-       }
-
-       if (dd->ipath_pageshadow) {
-               struct page **tmpp = dd->ipath_pageshadow;
-               dma_addr_t *tmpd = dd->ipath_physshadow;
-               int i, cnt = 0;
-
-               ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
-                          "locked\n");
-               for (port = 0; port < dd->ipath_cfgports; port++) {
-                       int port_tidbase = port * dd->ipath_rcvtidcnt;
-                       int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-                       for (i = port_tidbase; i < maxtid; i++) {
-                               if (!tmpp[i])
-                                       continue;
-                               pci_unmap_page(dd->pcidev, tmpd[i],
-                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                               ipath_release_user_pages(&tmpp[i], 1);
-                               tmpp[i] = NULL;
-                               cnt++;
-                       }
-               }
-               if (cnt) {
-                       ipath_stats.sps_pageunlocks += cnt;
-                       ipath_cdbg(VERBOSE, "There were still %u expTID "
-                                  "entries locked\n", cnt);
-               }
-               if (ipath_stats.sps_pagelocks ||
-                   ipath_stats.sps_pageunlocks)
-                       ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
-                                  "unlocked via ipath_m{un}lock\n",
-                                  (unsigned long long)
-                                  ipath_stats.sps_pagelocks,
-                                  (unsigned long long)
-                                  ipath_stats.sps_pageunlocks);
-
-               ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
-                          dd->ipath_pageshadow);
-               tmpp = dd->ipath_pageshadow;
-               dd->ipath_pageshadow = NULL;
-               vfree(tmpp);
-
-               dd->ipath_egrtidbase = NULL;
-       }
-
-       /*
-        * free any resources still in use (usually just kernel ports)
-        * at unload; we do for portcnt, because that's what we allocate.
-        * We acquire lock to be really paranoid that ipath_pd isn't being
-        * accessed from some interrupt-related code (that should not happen,
-        * but best to be sure).
-        */
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       tmp = dd->ipath_pd;
-       dd->ipath_pd = NULL;
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-       for (port = 0; port < dd->ipath_portcnt; port++) {
-               struct ipath_portdata *pd = tmp[port];
-               tmp[port] = NULL; /* debugging paranoia */
-               ipath_free_pddata(dd, pd);
-       }
-       kfree(tmp);
-}
-
-static void ipath_remove_one(struct pci_dev *pdev)
-{
-       struct ipath_devdata *dd = pci_get_drvdata(pdev);
-
-       ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
-
-       /*
-        * disable the IB link early, to be sure no new packets arrive, which
-        * complicates the shutdown process
-        */
-       ipath_shutdown_device(dd);
-
-       flush_workqueue(ib_wq);
-
-       if (dd->verbs_dev)
-               ipath_unregister_ib_device(dd->verbs_dev);
-
-       ipath_diag_remove(dd);
-       ipath_user_remove(dd);
-       ipathfs_remove_device(dd);
-       ipath_device_remove_group(&pdev->dev, dd);
-
-       ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
-                  "unit %u\n", dd, (u32) dd->ipath_unit);
-
-       cleanup_device(dd);
-
-       /*
-        * turn off rcv, send, and interrupts for all ports, all drivers
-        * should also hard reset the chip here?
-        * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
-        * for all versions of the driver, if they were allocated
-        */
-       if (dd->ipath_irq) {
-               ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
-                          dd->ipath_unit, dd->ipath_irq);
-               dd->ipath_f_free_irq(dd);
-       } else
-               ipath_dbg("irq is 0, not doing free_irq "
-                         "for unit %u\n", dd->ipath_unit);
-       /*
-        * we check for NULL here, because it's outside
-        * the kregbase check, and we need to call it
-        * after the free_irq.  Thus it's possible that
-        * the function pointers were never initialized.
-        */
-       if (dd->ipath_f_cleanup)
-               /* clean up chip-specific stuff */
-               dd->ipath_f_cleanup(dd);
-
-       ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
-       iounmap((volatile void __iomem *) dd->ipath_kregbase);
-       pci_release_regions(pdev);
-       ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
-       pci_disable_device(pdev);
-
-       ipath_free_devdata(pdev, dd);
-}
-
-/* general driver use */
-DEFINE_MUTEX(ipath_mutex);
-
-static DEFINE_SPINLOCK(ipath_pioavail_lock);
-
-/**
- * ipath_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the infinipath device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * cancel a range of PIO buffers, used when they might be armed, but
- * not triggered.  Used at init to ensure buffer state, and also user
- * process close, in case it died while writing to a PIO buffer
- * Also after errors.
- */
-void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
-                         unsigned cnt)
-{
-       unsigned i, last = first + cnt;
-       unsigned long flags;
-
-       ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
-       for (i = first; i < last; i++) {
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               /*
-                * The disarm-related bits are write-only, so it
-                * is ok to OR them in with our copy of sendctrl
-                * while we hold the lock.
-                */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl | INFINIPATH_S_DISARM |
-                       (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
-               /* can't disarm bufs back-to-back per iba7220 spec */
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-       /* on some older chips, update may not happen after cancel */
-       ipath_force_pio_avail_update(dd);
-}
-
-/**
- * ipath_wait_linkstate - wait for an IB link state change to occur
- * @dd: the infinipath device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route.  Currently used only by
- * ipath_set_linkstate.  Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
-{
-       dd->ipath_state_wanted = state;
-       wait_event_interruptible_timeout(ipath_state_wait,
-                                        (dd->ipath_flags & state),
-                                        msecs_to_jiffies(msecs));
-       dd->ipath_state_wanted = 0;
-
-       if (!(dd->ipath_flags & state)) {
-               u64 val;
-               ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
-                          " ms\n",
-                          /* test INIT ahead of DOWN, both can be set */
-                          (state & IPATH_LINKINIT) ? "INIT" :
-                          ((state & IPATH_LINKDOWN) ? "DOWN" :
-                           ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
-                          msecs);
-               val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-               ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
-                          (unsigned long long) ipath_read_kreg64(
-                                  dd, dd->ipath_kregs->kr_ibcctrl),
-                          (unsigned long long) val,
-                          ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
-       }
-       return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
-}
-
-static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
-       char *buf, size_t blen)
-{
-       static const struct {
-               ipath_err_t err;
-               const char *msg;
-       } errs[] = {
-               { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
-               { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
-               { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
-               { INFINIPATH_E_SDMABASE, "SDmaBase" },
-               { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
-               { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
-               { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
-               { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
-               { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
-               { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
-               { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
-               { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
-       };
-       int i;
-       int expected;
-       size_t bidx = 0;
-
-       for (i = 0; i < ARRAY_SIZE(errs); i++) {
-               expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
-                       test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-               if ((err & errs[i].err) && !expected)
-                       bidx += snprintf(buf + bidx, blen - bidx,
-                                        "%s ", errs[i].msg);
-       }
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else.   Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
-       ipath_err_t err)
-{
-       int iserr = 1;
-       *buf = '\0';
-       if (err & INFINIPATH_E_PKTERRS) {
-               if (!(err & ~INFINIPATH_E_PKTERRS))
-                       iserr = 0; // if only packet errors.
-               if (ipath_debug & __IPATH_ERRPKTDBG) {
-                       if (err & INFINIPATH_E_REBP)
-                               strlcat(buf, "EBP ", blen);
-                       if (err & INFINIPATH_E_RVCRC)
-                               strlcat(buf, "VCRC ", blen);
-                       if (err & INFINIPATH_E_RICRC) {
-                               strlcat(buf, "CRC ", blen);
-                               // clear for check below, so only once
-                               err &= INFINIPATH_E_RICRC;
-                       }
-                       if (err & INFINIPATH_E_RSHORTPKTLEN)
-                               strlcat(buf, "rshortpktlen ", blen);
-                       if (err & INFINIPATH_E_SDROPPEDDATAPKT)
-                               strlcat(buf, "sdroppeddatapkt ", blen);
-                       if (err & INFINIPATH_E_SPKTLEN)
-                               strlcat(buf, "spktlen ", blen);
-               }
-               if ((err & INFINIPATH_E_RICRC) &&
-                       !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
-                       strlcat(buf, "CRC ", blen);
-               if (!iserr)
-                       goto done;
-       }
-       if (err & INFINIPATH_E_RHDRLEN)
-               strlcat(buf, "rhdrlen ", blen);
-       if (err & INFINIPATH_E_RBADTID)
-               strlcat(buf, "rbadtid ", blen);
-       if (err & INFINIPATH_E_RBADVERSION)
-               strlcat(buf, "rbadversion ", blen);
-       if (err & INFINIPATH_E_RHDR)
-               strlcat(buf, "rhdr ", blen);
-       if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
-               strlcat(buf, "sendspecialtrigger ", blen);
-       if (err & INFINIPATH_E_RLONGPKTLEN)
-               strlcat(buf, "rlongpktlen ", blen);
-       if (err & INFINIPATH_E_RMAXPKTLEN)
-               strlcat(buf, "rmaxpktlen ", blen);
-       if (err & INFINIPATH_E_RMINPKTLEN)
-               strlcat(buf, "rminpktlen ", blen);
-       if (err & INFINIPATH_E_SMINPKTLEN)
-               strlcat(buf, "sminpktlen ", blen);
-       if (err & INFINIPATH_E_RFORMATERR)
-               strlcat(buf, "rformaterr ", blen);
-       if (err & INFINIPATH_E_RUNSUPVL)
-               strlcat(buf, "runsupvl ", blen);
-       if (err & INFINIPATH_E_RUNEXPCHAR)
-               strlcat(buf, "runexpchar ", blen);
-       if (err & INFINIPATH_E_RIBFLOW)
-               strlcat(buf, "ribflow ", blen);
-       if (err & INFINIPATH_E_SUNDERRUN)
-               strlcat(buf, "sunderrun ", blen);
-       if (err & INFINIPATH_E_SPIOARMLAUNCH)
-               strlcat(buf, "spioarmlaunch ", blen);
-       if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
-               strlcat(buf, "sunexperrpktnum ", blen);
-       if (err & INFINIPATH_E_SDROPPEDSMPPKT)
-               strlcat(buf, "sdroppedsmppkt ", blen);
-       if (err & INFINIPATH_E_SMAXPKTLEN)
-               strlcat(buf, "smaxpktlen ", blen);
-       if (err & INFINIPATH_E_SUNSUPVL)
-               strlcat(buf, "sunsupVL ", blen);
-       if (err & INFINIPATH_E_INVALIDADDR)
-               strlcat(buf, "invalidaddr ", blen);
-       if (err & INFINIPATH_E_RRCVEGRFULL)
-               strlcat(buf, "rcvegrfull ", blen);
-       if (err & INFINIPATH_E_RRCVHDRFULL)
-               strlcat(buf, "rcvhdrfull ", blen);
-       if (err & INFINIPATH_E_IBSTATUSCHANGED)
-               strlcat(buf, "ibcstatuschg ", blen);
-       if (err & INFINIPATH_E_RIBLOSTLINK)
-               strlcat(buf, "riblostlink ", blen);
-       if (err & INFINIPATH_E_HARDWARE)
-               strlcat(buf, "hardware ", blen);
-       if (err & INFINIPATH_E_RESET)
-               strlcat(buf, "reset ", blen);
-       if (err & INFINIPATH_E_SDMAERRS)
-               decode_sdma_errs(dd, err, buf, blen);
-       if (err & INFINIPATH_E_INVALIDEEPCMD)
-               strlcat(buf, "invalideepromcmd ", blen);
-done:
-       return iserr;
-}
-
-/**
- * get_rhf_errstring - decode RHF errors
- * @err: the err number
- * @msg: the output buffer
- * @len: the length of the output buffer
- *
- * only used one place now, may want more later
- */
-static void get_rhf_errstring(u32 err, char *msg, size_t len)
-{
-       /* if no errors, and so don't need to check what's first */
-       *msg = '\0';
-
-       if (err & INFINIPATH_RHF_H_ICRCERR)
-               strlcat(msg, "icrcerr ", len);
-       if (err & INFINIPATH_RHF_H_VCRCERR)
-               strlcat(msg, "vcrcerr ", len);
-       if (err & INFINIPATH_RHF_H_PARITYERR)
-               strlcat(msg, "parityerr ", len);
-       if (err & INFINIPATH_RHF_H_LENERR)
-               strlcat(msg, "lenerr ", len);
-       if (err & INFINIPATH_RHF_H_MTUERR)
-               strlcat(msg, "mtuerr ", len);
-       if (err & INFINIPATH_RHF_H_IHDRERR)
-               /* infinipath hdr checksum error */
-               strlcat(msg, "ipathhdrerr ", len);
-       if (err & INFINIPATH_RHF_H_TIDERR)
-               strlcat(msg, "tiderr ", len);
-       if (err & INFINIPATH_RHF_H_MKERR)
-               /* bad port, offset, etc. */
-               strlcat(msg, "invalid ipathhdr ", len);
-       if (err & INFINIPATH_RHF_H_IBERR)
-               strlcat(msg, "iberr ", len);
-       if (err & INFINIPATH_RHF_L_SWA)
-               strlcat(msg, "swA ", len);
-       if (err & INFINIPATH_RHF_L_SWB)
-               strlcat(msg, "swB ", len);
-}
-
-/**
- * ipath_get_egrbuf - get an eager buffer
- * @dd: the infinipath device
- * @bufnum: the eager buffer to get
- *
- * must only be called if ipath_pd[port] is known to be allocated
- */
-static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
-{
-       return dd->ipath_port0_skbinfo ?
-               (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
-}
-
-/**
- * ipath_alloc_skb - allocate an skb and buffer with possible constraints
- * @dd: the infinipath device
- * @gfp_mask: the sk_buff SFP mask
- */
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
-                               gfp_t gfp_mask)
-{
-       struct sk_buff *skb;
-       u32 len;
-
-       /*
-        * Only fully supported way to handle this is to allocate lots
-        * extra, align as needed, and then do skb_reserve().  That wastes
-        * a lot of memory...  I'll have to hack this into infinipath_copy
-        * also.
-        */
-
-       /*
-        * We need 2 extra bytes for ipath_ether data sent in the
-        * key header.  In order to keep everything dword aligned,
-        * we'll reserve 4 bytes.
-        */
-       len = dd->ipath_ibmaxlen + 4;
-
-       if (dd->ipath_flags & IPATH_4BYTE_TID) {
-               /* We need a 2KB multiple alignment, and there is no way
-                * to do it except to allocate extra and then skb_reserve
-                * enough to bring it up to the right alignment.
-                */
-               len += 2047;
-       }
-
-       skb = __dev_alloc_skb(len, gfp_mask);
-       if (!skb) {
-               ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
-                             len);
-               goto bail;
-       }
-
-       skb_reserve(skb, 4);
-
-       if (dd->ipath_flags & IPATH_4BYTE_TID) {
-               u32 una = (unsigned long)skb->data & 2047;
-               if (una)
-                       skb_reserve(skb, 2048 - una);
-       }
-
-bail:
-       return skb;
-}
-
-static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
-                            u32 eflags,
-                            u32 l,
-                            u32 etail,
-                            __le32 *rhf_addr,
-                            struct ipath_message_header *hdr)
-{
-       char emsg[128];
-
-       get_rhf_errstring(eflags, emsg, sizeof emsg);
-       ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
-                  "tlen=%x opcode=%x egridx=%x: %s\n",
-                  eflags, l,
-                  ipath_hdrget_rcv_type(rhf_addr),
-                  ipath_hdrget_length_in_bytes(rhf_addr),
-                  be32_to_cpu(hdr->bth[0]) >> 24,
-                  etail, emsg);
-
-       /* Count local link integrity errors. */
-       if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
-               u8 n = (dd->ipath_ibcctrl >>
-                       INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-                       INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-
-               if (++dd->ipath_lli_counter > n) {
-                       dd->ipath_lli_counter = 0;
-                       dd->ipath_lli_errors++;
-               }
-       }
-}
-
-/*
- * ipath_kreceive - receive a packet
- * @pd: the infinipath port
- *
- * called from interrupt handler for errors or receive interrupt
- */
-void ipath_kreceive(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       __le32 *rhf_addr;
-       void *ebuf;
-       const u32 rsize = dd->ipath_rcvhdrentsize;      /* words */
-       const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
-       u32 etail = -1, l, hdrqtail;
-       struct ipath_message_header *hdr;
-       u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
-       static u64 totcalls;    /* stats, may eventually remove */
-       int last;
-
-       l = pd->port_head;
-       rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
-       if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-               u32 seq = ipath_hdrget_seq(rhf_addr);
-
-               if (seq != pd->port_seq_cnt)
-                       goto bail;
-               hdrqtail = 0;
-       } else {
-               hdrqtail = ipath_get_rcvhdrtail(pd);
-               if (l == hdrqtail)
-                       goto bail;
-               smp_rmb();
-       }
-
-reloop:
-       for (last = 0, i = 1; !last; i += !last) {
-               hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
-               eflags = ipath_hdrget_err_flags(rhf_addr);
-               etype = ipath_hdrget_rcv_type(rhf_addr);
-               /* total length */
-               tlen = ipath_hdrget_length_in_bytes(rhf_addr);
-               ebuf = NULL;
-               if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
-                   ipath_hdrget_use_egr_buf(rhf_addr) :
-                   (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
-                       /*
-                        * It turns out that the chip uses an eager buffer
-                        * for all non-expected packets, whether it "needs"
-                        * one or not.  So always get the index, but don't
-                        * set ebuf (so we try to copy data) unless the
-                        * length requires it.
-                        */
-                       etail = ipath_hdrget_index(rhf_addr);
-                       updegr = 1;
-                       if (tlen > sizeof(*hdr) ||
-                           etype == RCVHQ_RCV_TYPE_NON_KD)
-                               ebuf = ipath_get_egrbuf(dd, etail);
-               }
-
-               /*
-                * both tiderr and ipathhdrerr are set for all plain IB
-                * packets; only ipathhdrerr should be set.
-                */
-
-               if (etype != RCVHQ_RCV_TYPE_NON_KD &&
-                   etype != RCVHQ_RCV_TYPE_ERROR &&
-                   ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
-                   IPS_PROTO_VERSION)
-                       ipath_cdbg(PKT, "Bad InfiniPath protocol version "
-                                  "%x\n", etype);
-
-               if (unlikely(eflags))
-                       ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
-               else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
-                       ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
-                       if (dd->ipath_lli_counter)
-                               dd->ipath_lli_counter--;
-               } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
-                       u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
-                       u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
-                       ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
-                                  "qp=%x), len %x; ignored\n",
-                                  etype, opcode, qp, tlen);
-               } else if (etype == RCVHQ_RCV_TYPE_EXPECTED) {
-                       ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
-                                 be32_to_cpu(hdr->bth[0]) >> 24);
-               } else {
-                       /*
-                        * error packet, type of error unknown.
-                        * Probably type 3, but we don't know, so don't
-                        * even try to print the opcode, etc.
-                        * Usually caused by a "bad packet", that has no
-                        * BTH, when the LRH says it should.
-                        */
-                       ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
-                                 " %x, len %x hdrq+%x rhf: %Lx\n",
-                                 etail, tlen, l, (unsigned long long)
-                                 le64_to_cpu(*(__le64 *) rhf_addr));
-                       if (ipath_debug & __IPATH_ERRPKTDBG) {
-                               u32 j, *d, dw = rsize-2;
-                               if (rsize > (tlen>>2))
-                                       dw = tlen>>2;
-                               d = (u32 *)hdr;
-                               printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
-                                       dw);
-                               for (j = 0; j < dw; j++)
-                                       printk(KERN_DEBUG "%8x%s", d[j],
-                                               (j%8) == 7 ? "\n" : " ");
-                               printk(KERN_DEBUG ".\n");
-                       }
-               }
-               l += rsize;
-               if (l >= maxcnt)
-                       l = 0;
-               rhf_addr = (__le32 *) pd->port_rcvhdrq +
-                       l + dd->ipath_rhf_offset;
-               if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-                       u32 seq = ipath_hdrget_seq(rhf_addr);
-
-                       if (++pd->port_seq_cnt > 13)
-                               pd->port_seq_cnt = 1;
-                       if (seq != pd->port_seq_cnt)
-                               last = 1;
-               } else if (l == hdrqtail) {
-                       last = 1;
-               }
-               /*
-                * update head regs on last packet, and every 16 packets.
-                * Reduce bus traffic, while still trying to prevent
-                * rcvhdrq overflows, for when the queue is nearly full
-                */
-               if (last || !(i & 0xf)) {
-                       u64 lval = l;
-
-                       /* request IBA6120 and 7220 interrupt only on last */
-                       if (last)
-                               lval |= dd->ipath_rhdrhead_intr_off;
-                       ipath_write_ureg(dd, ur_rcvhdrhead, lval,
-                               pd->port_port);
-                       if (updegr) {
-                               ipath_write_ureg(dd, ur_rcvegrindexhead,
-                                                etail, pd->port_port);
-                               updegr = 0;
-                       }
-               }
-       }
-
-       if (!dd->ipath_rhdrhead_intr_off && !reloop &&
-           !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-               /* IBA6110 workaround; we can have a race clearing chip
-                * interrupt with another interrupt about to be delivered,
-                * and can clear it before it is delivered on the GPIO
-                * workaround.  By doing the extra check here for the
-                * in-memory tail register updating while we were doing
-                * earlier packets, we "almost" guarantee we have covered
-                * that case.
-                */
-               u32 hqtail = ipath_get_rcvhdrtail(pd);
-               if (hqtail != hdrqtail) {
-                       hdrqtail = hqtail;
-                       reloop = 1; /* loop 1 extra time at most */
-                       goto reloop;
-               }
-       }
-
-       pkttot += i;
-
-       pd->port_head = l;
-
-       if (pkttot > ipath_stats.sps_maxpkts_call)
-               ipath_stats.sps_maxpkts_call = pkttot;
-       ipath_stats.sps_port0pkts += pkttot;
-       ipath_stats.sps_avgpkts_call =
-               ipath_stats.sps_port0pkts / ++totcalls;
-
-bail:;
-}
-
-/**
- * ipath_update_pio_bufs - update shadow copy of the PIO availability map
- * @dd: the infinipath device
- *
- * called whenever our local copy indicates we have run out of send buffers
- * NOTE: This can be called from interrupt context by some code
- * and from non-interrupt context by ipath_getpiobuf().
- */
-
-static void ipath_update_pio_bufs(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       int i;
-       const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
-
-       /* If the generation (check) bits have changed, then we update the
-        * busy bit for the corresponding PIO buffer.  This algorithm will
-        * modify positions to the value they already have in some cases
-        * (i.e., no change), but it's faster than changing only the bits
-        * that have changed.
-        *
-        * We would like to do this atomicly, to avoid spinlocks in the
-        * critical send path, but that's not really possible, given the
-        * type of changes, and that this routine could be called on
-        * multiple cpu's simultaneously, so we lock in this routine only,
-        * to avoid conflicting updates; all we change is the shadow, and
-        * it's a single 64 bit memory location, so by definition the update
-        * is atomic in terms of what other cpu's can see in testing the
-        * bits.  The spin_lock overhead isn't too bad, since it only
-        * happens when all buffers are in use, so only cpu overhead, not
-        * latency or bandwidth is affected.
-        */
-       if (!dd->ipath_pioavailregs_dma) {
-               ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
-               return;
-       }
-       if (ipath_debug & __IPATH_VERBDBG) {
-               /* only if packet debug and verbose */
-               volatile __le64 *dma = dd->ipath_pioavailregs_dma;
-               unsigned long *shadow = dd->ipath_pioavailshadow;
-
-               ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
-                          "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
-                          "s3=%lx\n",
-                          (unsigned long long) le64_to_cpu(dma[0]),
-                          shadow[0],
-                          (unsigned long long) le64_to_cpu(dma[1]),
-                          shadow[1],
-                          (unsigned long long) le64_to_cpu(dma[2]),
-                          shadow[2],
-                          (unsigned long long) le64_to_cpu(dma[3]),
-                          shadow[3]);
-               if (piobregs > 4)
-                       ipath_cdbg(
-                               PKT, "2nd group, dma4=%llx shad4=%lx, "
-                               "d5=%llx s5=%lx, d6=%llx s6=%lx, "
-                               "d7=%llx s7=%lx\n",
-                               (unsigned long long) le64_to_cpu(dma[4]),
-                               shadow[4],
-                               (unsigned long long) le64_to_cpu(dma[5]),
-                               shadow[5],
-                               (unsigned long long) le64_to_cpu(dma[6]),
-                               shadow[6],
-                               (unsigned long long) le64_to_cpu(dma[7]),
-                               shadow[7]);
-       }
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (i = 0; i < piobregs; i++) {
-               u64 pchbusy, pchg, piov, pnew;
-               /*
-                * Chip Errata: bug 6641; even and odd qwords>3 are swapped
-                */
-               if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-                       piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
-               else
-                       piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
-               pchg = dd->ipath_pioavailkernel[i] &
-                       ~(dd->ipath_pioavailshadow[i] ^ piov);
-               pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
-               if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
-                       pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
-                       pnew |= piov & pchbusy;
-                       dd->ipath_pioavailshadow[i] = pnew;
-               }
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/*
- * used to force update of pioavailshadow if we can't get a pio buffer.
- * Needed primarily due to exitting freeze mode after recovering
- * from errors.  Done lazily, because it's safer (known to not
- * be writing pio buffers).
- */
-static void ipath_reset_availshadow(struct ipath_devdata *dd)
-{
-       int i, im;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (i = 0; i < dd->ipath_pioavregs; i++) {
-               u64 val, oldval;
-               /* deal with 6110 chip bug on high register #s */
-               im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-                       i ^ 1 : i;
-               val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
-               /*
-                * busy out the buffers not in the kernel avail list,
-                * without changing the generation bits.
-                */
-               oldval = dd->ipath_pioavailshadow[i];
-               dd->ipath_pioavailshadow[i] = val |
-                       ((~dd->ipath_pioavailkernel[i] <<
-                       INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
-                       0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
-               if (oldval != dd->ipath_pioavailshadow[i])
-                       ipath_dbg("shadow[%d] was %Lx, now %lx\n",
-                               i, (unsigned long long) oldval,
-                               dd->ipath_pioavailshadow[i]);
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/**
- * ipath_setrcvhdrsize - set the receive header size
- * @dd: the infinipath device
- * @rhdrsize: the receive header size
- *
- * called from user init code, and also layered driver init
- */
-int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
-{
-       int ret = 0;
-
-       if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
-               if (dd->ipath_rcvhdrsize != rhdrsize) {
-                       dev_info(&dd->pcidev->dev,
-                                "Error: can't set protocol header "
-                                "size %u, already %u\n",
-                                rhdrsize, dd->ipath_rcvhdrsize);
-                       ret = -EAGAIN;
-               } else
-                       ipath_cdbg(VERBOSE, "Reuse same protocol header "
-                                  "size %u\n", dd->ipath_rcvhdrsize);
-       } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
-                              (sizeof(u64) / sizeof(u32)))) {
-               ipath_dbg("Error: can't set protocol header size %u "
-                         "(> max %u)\n", rhdrsize,
-                         dd->ipath_rcvhdrentsize -
-                         (u32) (sizeof(u64) / sizeof(u32)));
-               ret = -EOVERFLOW;
-       } else {
-               dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
-               dd->ipath_rcvhdrsize = rhdrsize;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
-                                dd->ipath_rcvhdrsize);
-               ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
-                          dd->ipath_rcvhdrsize);
-       }
-       return ret;
-}
-
-/*
- * debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_pio_bufs(struct ipath_devdata *dd)
-{
-       unsigned long *shadow = dd->ipath_pioavailshadow;
-       __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
-
-       dd->ipath_upd_pio_shadow = 1;
-
-       /*
-        * not atomic, but if we lose a stat count in a while, that's OK
-        */
-       ipath_stats.sps_nopiobufs++;
-       if (!(++dd->ipath_consec_nopiobuf % 100000)) {
-               ipath_force_pio_avail_update(dd); /* at start */
-               ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
-                       "%llx %llx %llx %llx\n"
-                       "ipath  shadow:  %lx %lx %lx %lx\n",
-                       dd->ipath_consec_nopiobuf,
-                       (unsigned long)get_cycles(),
-                       (unsigned long long) le64_to_cpu(dma[0]),
-                       (unsigned long long) le64_to_cpu(dma[1]),
-                       (unsigned long long) le64_to_cpu(dma[2]),
-                       (unsigned long long) le64_to_cpu(dma[3]),
-                       shadow[0], shadow[1], shadow[2], shadow[3]);
-               /*
-                * 4 buffers per byte, 4 registers above, cover rest
-                * below
-                */
-               if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
-                   (sizeof(shadow[0]) * 4 * 4))
-                       ipath_dbg("2nd group: dmacopy: "
-                                 "%llx %llx %llx %llx\n"
-                                 "ipath  shadow:  %lx %lx %lx %lx\n",
-                                 (unsigned long long)le64_to_cpu(dma[4]),
-                                 (unsigned long long)le64_to_cpu(dma[5]),
-                                 (unsigned long long)le64_to_cpu(dma[6]),
-                                 (unsigned long long)le64_to_cpu(dma[7]),
-                                 shadow[4], shadow[5], shadow[6], shadow[7]);
-
-               /* at end, so update likely happened */
-               ipath_reset_availshadow(dd);
-       }
-}
-
-/*
- * common code for normal driver pio buffer allocation, and reserved
- * allocation.
- *
- * do appropriate marking as busy, etc.
- * returns buffer number if one found (>=0), negative number is error.
- */
-static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
-       u32 *pbufnum, u32 first, u32 last, u32 firsti)
-{
-       int i, j, updated = 0;
-       unsigned piobcnt;
-       unsigned long flags;
-       unsigned long *shadow = dd->ipath_pioavailshadow;
-       u32 __iomem *buf;
-
-       piobcnt = last - first;
-       if (dd->ipath_upd_pio_shadow) {
-               /*
-                * Minor optimization.  If we had no buffers on last call,
-                * start out by doing the update; continue and do scan even
-                * if no buffers were updated, to be paranoid
-                */
-               ipath_update_pio_bufs(dd);
-               updated++;
-               i = first;
-       } else
-               i = firsti;
-rescan:
-       /*
-        * while test_and_set_bit() is atomic, we do that and then the
-        * change_bit(), and the pair is not.  See if this is the cause
-        * of the remaining armlaunch errors.
-        */
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (j = 0; j < piobcnt; j++, i++) {
-               if (i >= last)
-                       i = first;
-               if (__test_and_set_bit((2 * i) + 1, shadow))
-                       continue;
-               /* flip generation bit */
-               __change_bit(2 * i, shadow);
-               break;
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
-       if (j == piobcnt) {
-               if (!updated) {
-                       /*
-                        * first time through; shadow exhausted, but may be
-                        * buffers available, try an update and then rescan.
-                        */
-                       ipath_update_pio_bufs(dd);
-                       updated++;
-                       i = first;
-                       goto rescan;
-               } else if (updated == 1 && piobcnt <=
-                       ((dd->ipath_sendctrl
-                       >> INFINIPATH_S_UPDTHRESH_SHIFT) &
-                       INFINIPATH_S_UPDTHRESH_MASK)) {
-                       /*
-                        * for chips supporting and using the update
-                        * threshold we need to force an update of the
-                        * in-memory copy if the count is less than the
-                        * thershold, then check one more time.
-                        */
-                       ipath_force_pio_avail_update(dd);
-                       ipath_update_pio_bufs(dd);
-                       updated++;
-                       i = first;
-                       goto rescan;
-               }
-
-               no_pio_bufs(dd);
-               buf = NULL;
-       } else {
-               if (i < dd->ipath_piobcnt2k)
-                       buf = (u32 __iomem *) (dd->ipath_pio2kbase +
-                                              i * dd->ipath_palign);
-               else
-                       buf = (u32 __iomem *)
-                               (dd->ipath_pio4kbase +
-                                (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
-               if (pbufnum)
-                       *pbufnum = i;
-       }
-
-       return buf;
-}
-
-/**
- * ipath_getpiobuf - find an available pio buffer
- * @dd: the infinipath device
- * @plen: the size of the PIO buffer needed in 32-bit words
- * @pbufnum: the buffer number is placed here
- */
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
-{
-       u32 __iomem *buf;
-       u32 pnum, nbufs;
-       u32 first, lasti;
-
-       if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
-               first = dd->ipath_piobcnt2k;
-               lasti = dd->ipath_lastpioindexl;
-       } else {
-               first = 0;
-               lasti = dd->ipath_lastpioindex;
-       }
-       nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
-
-       if (buf) {
-               /*
-                * Set next starting place.  It's just an optimization,
-                * it doesn't matter who wins on this, so no locking
-                */
-               if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
-                       dd->ipath_lastpioindexl = pnum + 1;
-               else
-                       dd->ipath_lastpioindex = pnum + 1;
-               if (dd->ipath_upd_pio_shadow)
-                       dd->ipath_upd_pio_shadow = 0;
-               if (dd->ipath_consec_nopiobuf)
-                       dd->ipath_consec_nopiobuf = 0;
-               ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
-                          pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
-               if (pbufnum)
-                       *pbufnum = pnum;
-
-       }
-       return buf;
-}
-
-/**
- * ipath_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the infinipath device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- */
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
-                             unsigned len, int avail)
-{
-       unsigned long flags;
-       unsigned end, cnt = 0;
-
-       /* There are two bits per send buffer (busy and generation) */
-       start *= 2;
-       end = start + len * 2;
-
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       /* Set or clear the busy bit in the shadow. */
-       while (start < end) {
-               if (avail) {
-                       unsigned long dma;
-                       int i, im;
-                       /*
-                        * the BUSY bit will never be set, because we disarm
-                        * the user buffers before we hand them back to the
-                        * kernel.  We do have to make sure the generation
-                        * bit is set correctly in shadow, since it could
-                        * have changed many times while allocated to user.
-                        * We can't use the bitmap functions on the full
-                        * dma array because it is always little-endian, so
-                        * we have to flip to host-order first.
-                        * BITS_PER_LONG is slightly wrong, since it's
-                        * always 64 bits per register in chip...
-                        * We only work on 64 bit kernels, so that's OK.
-                        */
-                       /* deal with 6110 chip bug on high register #s */
-                       i = start / BITS_PER_LONG;
-                       im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-                               i ^ 1 : i;
-                       __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
-                               + start, dd->ipath_pioavailshadow);
-                       dma = (unsigned long) le64_to_cpu(
-                               dd->ipath_pioavailregs_dma[im]);
-                       if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-                               + start) % BITS_PER_LONG, &dma))
-                               __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-                                       + start, dd->ipath_pioavailshadow);
-                       else
-                               __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-                                       + start, dd->ipath_pioavailshadow);
-                       __set_bit(start, dd->ipath_pioavailkernel);
-               } else {
-                       __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
-                               dd->ipath_pioavailshadow);
-                       __clear_bit(start, dd->ipath_pioavailkernel);
-               }
-               start += 2;
-       }
-
-       if (dd->ipath_pioupd_thresh) {
-               end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-               cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
-       /*
-        * When moving buffers from kernel to user, if number assigned to
-        * the user is less than the pio update threshold, and threshold
-        * is supported (cnt was computed > 0), drop the update threshold
-        * so we update at least once per allocated number of buffers.
-        * In any case, if the kernel buffers are less than the threshold,
-        * drop the threshold.  We don't bother increasing it, having once
-        * decreased it, since it would typically just cycle back and forth.
-        * If we don't decrease below buffers in use, we can wait a long
-        * time for an update, until some other context uses PIO buffers.
-        */
-       if (!avail && len < cnt)
-               cnt = len;
-       if (cnt < dd->ipath_pioupd_thresh) {
-               dd->ipath_pioupd_thresh = cnt;
-               ipath_dbg("Decreased pio update threshold to %u\n",
-                       dd->ipath_pioupd_thresh);
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
-                       << INFINIPATH_S_UPDTHRESH_SHIFT);
-               dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
-                       << INFINIPATH_S_UPDTHRESH_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-}
-
-/**
- * ipath_create_rcvhdrq - create a receive header queue
- * @dd: the infinipath device
- * @pd: the port data
- *
- * this must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int ipath_create_rcvhdrq(struct ipath_devdata *dd,
-                        struct ipath_portdata *pd)
-{
-       int ret = 0;
-
-       if (!pd->port_rcvhdrq) {
-               dma_addr_t phys_hdrqtail;
-               gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-               int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                               sizeof(u32), PAGE_SIZE);
-
-               pd->port_rcvhdrq = dma_alloc_coherent(
-                       &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
-                       gfp_flags);
-
-               if (!pd->port_rcvhdrq) {
-                       ipath_dev_err(dd, "attempt to allocate %d bytes "
-                                     "for port %u rcvhdrq failed\n",
-                                     amt, pd->port_port);
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-                       pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
-                               &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
-                               GFP_KERNEL);
-                       if (!pd->port_rcvhdrtail_kvaddr) {
-                               ipath_dev_err(dd, "attempt to allocate 1 page "
-                                       "for port %u rcvhdrqtailaddr "
-                                       "failed\n", pd->port_port);
-                               ret = -ENOMEM;
-                               dma_free_coherent(&dd->pcidev->dev, amt,
-                                       pd->port_rcvhdrq,
-                                       pd->port_rcvhdrq_phys);
-                               pd->port_rcvhdrq = NULL;
-                               goto bail;
-                       }
-                       pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
-                       ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
-                                  "physical\n", pd->port_port,
-                                  (unsigned long long) phys_hdrqtail);
-               }
-
-               pd->port_rcvhdrq_size = amt;
-
-               ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
-                          "for port %u rcvhdr Q\n",
-                          amt >> PAGE_SHIFT, pd->port_rcvhdrq,
-                          (unsigned long) pd->port_rcvhdrq_phys,
-                          (unsigned long) pd->port_rcvhdrq_size,
-                          pd->port_port);
-       } else {
-               ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
-                          "hdrtailaddr@%p %llx physical\n",
-                          pd->port_port, pd->port_rcvhdrq,
-                          (unsigned long long) pd->port_rcvhdrq_phys,
-                          pd->port_rcvhdrtail_kvaddr, (unsigned long long)
-                          pd->port_rcvhdrqtailaddr_phys);
-       }
-       /* clear for security and sanity on each use */
-       memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
-       if (pd->port_rcvhdrtail_kvaddr)
-               memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
-
-       /*
-        * tell chip each time we init it, even if we are re-using previous
-        * memory (we zero the register at process close)
-        */
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                             pd->port_port, pd->port_rcvhdrqtailaddr_phys);
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
-                             pd->port_port, pd->port_rcvhdrq_phys);
-
-bail:
-       return ret;
-}
-
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent.   Used whenever we need to be
- * sure the send side is idle.  Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo.  The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if normal send had happened.
- */
-void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
-{
-       unsigned long flags;
-
-       if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
-               ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
-               goto bail;
-       }
-       /*
-        * If we have SDMA, and it's not disabled, we have to kick off the
-        * abort state machine, provided we aren't already aborting.
-        * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
-        * we skip the rest of this routine. It is already "in progress"
-        */
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
-               int skip_cancel;
-               unsigned long *statp = &dd->ipath_sdma_status;
-
-               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-               skip_cancel =
-                       test_and_set_bit(IPATH_SDMA_ABORTING, statp)
-                       && !test_bit(IPATH_SDMA_DISABLED, statp);
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-               if (skip_cancel)
-                       goto bail;
-       }
-
-       ipath_dbg("Cancelling all in-progress send buffers\n");
-
-       /* skip armlaunch errs for a while */
-       dd->ipath_lastcancel = jiffies + HZ / 2;
-
-       /*
-        * The abort bit is auto-clearing.  We also don't want pioavail
-        * update happening during this, and we don't want any other
-        * sends going out, so turn those off for the duration.  We read
-        * the scratch register to be sure that cancels and the abort
-        * have taken effect in the chip.  Otherwise two parts are same
-        * as ipath_force_pio_avail_update()
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
-               | INFINIPATH_S_PIOENABLE);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-               dd->ipath_sendctrl | INFINIPATH_S_ABORT);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /* disarm all send buffers */
-       ipath_disarm_piobufs(dd, 0,
-               dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-
-       if (restore_sendctrl) {
-               /* else done by caller later if needed */
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
-                       INFINIPATH_S_PIOENABLE;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl);
-               /* and again, be sure all have hit the chip */
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-
-       if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
-           !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
-           test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
-               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-               /* only wait so long for intr */
-               dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
-               dd->ipath_sdma_reset_wait = 200;
-               if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-                       tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       }
-bail:;
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons.  We read the scratch register
- * to make it highly likely that the update will have happened by the
- * time we return.  If already off (as in cancel_sends above), this
- * routine is a nop, on the assumption that the caller will "do the
- * right thing".
- */
-void ipath_force_pio_avail_update(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       }
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
-                               int linitcmd)
-{
-       u64 mod_wd;
-       static const char *what[4] = {
-               [0] = "NOP",
-               [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
-               [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
-               [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
-       };
-
-       if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
-               /*
-                * If we are told to disable, note that so link-recovery
-                * code does not attempt to bring us back up.
-                */
-               preempt_disable();
-               dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
-               preempt_enable();
-       } else if (linitcmd) {
-               /*
-                * Any other linkinitcmd will lead to LINKDOWN and then
-                * to INIT (if all is well), so clear flag to let
-                * link-recovery code attempt to bring us back up.
-                */
-               preempt_disable();
-               dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
-               preempt_enable();
-       }
-
-       mod_wd = (linkcmd << dd->ibcc_lc_shift) |
-               (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
-       ipath_cdbg(VERBOSE,
-               "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
-               dd->ipath_unit, what[linkcmd], linitcmd,
-               ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
-                       ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                        dd->ipath_ibcctrl | mod_wd);
-       /* read from chip so write is flushed */
-       (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-}
-
-int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
-{
-       u32 lstate;
-       int ret;
-
-       switch (newstate) {
-       case IPATH_IB_LINKDOWN_ONLY:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKDOWN:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-                                       INFINIPATH_IBCC_LINKINITCMD_POLL);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKDOWN_SLEEP:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-                                       INFINIPATH_IBCC_LINKINITCMD_SLEEP);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKDOWN_DISABLE:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-                                       INFINIPATH_IBCC_LINKINITCMD_DISABLE);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKARM:
-               if (dd->ipath_flags & IPATH_LINKARMED) {
-                       ret = 0;
-                       goto bail;
-               }
-               if (!(dd->ipath_flags &
-                     (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
-
-               /*
-                * Since the port can transition to ACTIVE by receiving
-                * a non VL 15 packet, wait for either state.
-                */
-               lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
-               break;
-
-       case IPATH_IB_LINKACTIVE:
-               if (dd->ipath_flags & IPATH_LINKACTIVE) {
-                       ret = 0;
-                       goto bail;
-               }
-               if (!(dd->ipath_flags & IPATH_LINKARMED)) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
-               lstate = IPATH_LINKACTIVE;
-               break;
-
-       case IPATH_IB_LINK_LOOPBACK:
-               dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
-               dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-
-               /* turn heartbeat off, as it causes loopback to fail */
-               dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                                      IPATH_IB_HRTBT_OFF);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINK_EXTERNAL:
-               dev_info(&dd->pcidev->dev,
-                       "Disabling IB local loopback (normal)\n");
-               dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                                      IPATH_IB_HRTBT_ON);
-               dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       /*
-        * Heartbeat can be explicitly enabled by the user via
-        * "hrtbt_enable" "file", and if disabled, trying to enable here
-        * will have no effect.  Implicit changes (heartbeat off when
-        * loopback on, and vice versa) are included to ease testing.
-        */
-       case IPATH_IB_LINK_HRTBT:
-               ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                       IPATH_IB_HRTBT_ON);
-               goto bail;
-
-       case IPATH_IB_LINK_NO_HRTBT:
-               ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                       IPATH_IB_HRTBT_OFF);
-               goto bail;
-
-       default:
-               ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
-               ret = -EINVAL;
-               goto bail;
-       }
-       ret = ipath_wait_linkstate(dd, lstate, 2000);
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_set_mtu - set the MTU
- * @dd: the infinipath device
- * @arg: the new MTU
- *
- * we can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size.   For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
-{
-       u32 piosize;
-       int changed = 0;
-       int ret;
-
-       /*
-        * mtu is IB data payload max.  It's the largest power of 2 less
-        * than piosize (or even larger, since it only really controls the
-        * largest we can receive; we can send the max of the mtu and
-        * piosize).  We check that it's one of the valid IB sizes.
-        */
-       if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
-           (arg != 4096 || !ipath_mtu4096)) {
-               ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
-               ret = -EINVAL;
-               goto bail;
-       }
-       if (dd->ipath_ibmtu == arg) {
-               ret = 0;        /* same as current */
-               goto bail;
-       }
-
-       piosize = dd->ipath_ibmaxlen;
-       dd->ipath_ibmtu = arg;
-
-       if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
-               /* Only if it's not the initial value (or reset to it) */
-               if (piosize != dd->ipath_init_ibmaxlen) {
-                       if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
-                               piosize = dd->ipath_init_ibmaxlen;
-                       dd->ipath_ibmaxlen = piosize;
-                       changed = 1;
-               }
-       } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
-               piosize = arg + IPATH_PIO_MAXIBHDR;
-               ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
-                          "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
-                          arg);
-               dd->ipath_ibmaxlen = piosize;
-               changed = 1;
-       }
-
-       if (changed) {
-               u64 ibc = dd->ipath_ibcctrl, ibdw;
-               /*
-                * update our housekeeping variables, and set IBC max
-                * size, same as init code; max IBC is max we allow in
-                * buffer, less the qword pbc, plus 1 for ICRC, in dwords
-                */
-               dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
-               ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
-               ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
-                        dd->ibcc_mpl_shift);
-               ibc |= ibdw << dd->ibcc_mpl_shift;
-               dd->ipath_ibcctrl = ibc;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-               dd->ipath_f_tidtemplate(dd);
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
-{
-       dd->ipath_lid = lid;
-       dd->ipath_lmc = lmc;
-
-       dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
-               (~((1U << lmc) - 1)) << 16);
-
-       dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
-
-       return 0;
-}
-
-
-/**
- * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
- * @dd: the infinipath device
- * @regno: the register number to write
- * @port: the port containing the register
- * @value: the value to write
- *
- * Registers that vary with the chip implementation constants (port)
- * use this routine.
- */
-void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
-                         unsigned port, u64 value)
-{
-       u16 where;
-
-       if (port < dd->ipath_portcnt &&
-           (regno == dd->ipath_kregs->kr_rcvhdraddr ||
-            regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
-               where = regno + port;
-       else
-               where = -1;
-
-       ipath_write_kreg(dd, where, value);
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void ipath_run_led_override(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-       int timeoff;
-       int pidx;
-       u64 lstate, ltstate, val;
-
-       if (!(dd->ipath_flags & IPATH_INITTED))
-               return;
-
-       pidx = dd->ipath_led_override_phase++ & 1;
-       dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
-       timeoff = dd->ipath_led_override_timeoff;
-
-       /*
-        * below potentially restores the LED values per current status,
-        * should also possibly setup the traffic-blink register,
-        * but leave that to per-chip functions.
-        */
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-       ltstate = ipath_ib_linktrstate(dd, val);
-       lstate = ipath_ib_linkstate(dd, val);
-
-       dd->ipath_f_setextled(dd, lstate, ltstate);
-       mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
-}
-
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
-{
-       int timeoff, freq;
-
-       if (!(dd->ipath_flags & IPATH_INITTED))
-               return;
-
-       /* First check if we are blinking. If not, use 1HZ polling */
-       timeoff = HZ;
-       freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
-       if (freq) {
-               /* For blink, set each phase from one nybble of val */
-               dd->ipath_led_override_vals[0] = val & 0xF;
-               dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
-               timeoff = (HZ << 4)/freq;
-       } else {
-               /* Non-blink set both phases the same. */
-               dd->ipath_led_override_vals[0] = val & 0xF;
-               dd->ipath_led_override_vals[1] = val & 0xF;
-       }
-       dd->ipath_led_override_timeoff = timeoff;
-
-       /*
-        * If the timer has not already been started, do so. Use a "quick"
-        * timeout so the function will be called soon, to look at our request.
-        */
-       if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
-               /* Need to start timer */
-               setup_timer(&dd->ipath_led_override_timer,
-                               ipath_run_led_override, (unsigned long)dd);
-
-               dd->ipath_led_override_timer.expires = jiffies + 1;
-               add_timer(&dd->ipath_led_override_timer);
-       } else
-               atomic_dec(&dd->ipath_led_override_timer_active);
-}
-
-/**
- * ipath_shutdown_device - shut down a device
- * @dd: the infinipath device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled.   It does not free any data structures.
- * Everything it does has to be setup again by ipath_init_chip(dd,1)
- */
-void ipath_shutdown_device(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       ipath_dbg("Shutting down the device\n");
-
-       ipath_hol_up(dd); /* make sure user processes aren't suspended */
-
-       dd->ipath_flags |= IPATH_LINKUNK;
-       dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
-                            IPATH_LINKINIT | IPATH_LINKARMED |
-                            IPATH_LINKACTIVE);
-       *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
-                               IPATH_STATUS_IB_READY);
-
-       /* mask interrupts, but not errors */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
-       dd->ipath_rcvctrl = 0;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               teardown_sdma(dd);
-
-       /*
-        * gracefully stop all sends allowing any in progress to trickle out
-        * first.
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl = 0;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       /* flush it */
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /*
-        * enough for anything that's going to trickle out to have actually
-        * done so.
-        */
-       udelay(5);
-
-       dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
-
-       ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
-       ipath_cancel_sends(dd, 0);
-
-       /*
-        * we are shutting down, so tell components that care.  We don't do
-        * this on just a link state change, much like ethernet, a cable
-        * unplug, etc. doesn't change driver state
-        */
-       signal_ib_event(dd, IB_EVENT_PORT_ERR);
-
-       /* disable IBC */
-       dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control | INFINIPATH_C_FREEZEMODE);
-
-       /*
-        * clear SerdesEnable and turn the leds off; do this here because
-        * we are unloading, so don't count on interrupts to move along
-        * Turn the LEDs off explicitly for the same reason.
-        */
-       dd->ipath_f_quiet_serdes(dd);
-
-       /* stop all the timers that might still be running */
-       del_timer_sync(&dd->ipath_hol_timer);
-       if (dd->ipath_stats_timer_active) {
-               del_timer_sync(&dd->ipath_stats_timer);
-               dd->ipath_stats_timer_active = 0;
-       }
-       if (dd->ipath_intrchk_timer.data) {
-               del_timer_sync(&dd->ipath_intrchk_timer);
-               dd->ipath_intrchk_timer.data = 0;
-       }
-       if (atomic_read(&dd->ipath_led_override_timer_active)) {
-               del_timer_sync(&dd->ipath_led_override_timer);
-               atomic_set(&dd->ipath_led_override_timer_active, 0);
-       }
-
-       /*
-        * clear all interrupts and errors, so that the next time the driver
-        * is loaded or device is enabled, we know that whatever is set
-        * happened while we were unloaded
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
-       ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
-       ipath_update_eeprom_log(dd);
-}
-
-/**
- * ipath_free_pddata - free a port's allocated data
- * @dd: the infinipath device
- * @pd: the portdata structure
- *
- * free up any allocated data for a port
- * This should not touch anything that would affect a simultaneous
- * re-allocation of port data, because it is called after ipath_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- * (The only exception to global state is freeing the port0 port0_skbs.)
- */
-void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
-{
-       if (!pd)
-               return;
-
-       if (pd->port_rcvhdrq) {
-               ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
-                          "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
-                          (unsigned long) pd->port_rcvhdrq_size);
-               dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
-                                 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
-               pd->port_rcvhdrq = NULL;
-               if (pd->port_rcvhdrtail_kvaddr) {
-                       dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-                                        pd->port_rcvhdrtail_kvaddr,
-                                        pd->port_rcvhdrqtailaddr_phys);
-                       pd->port_rcvhdrtail_kvaddr = NULL;
-               }
-       }
-       if (pd->port_port && pd->port_rcvegrbuf) {
-               unsigned e;
-
-               for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-                       void *base = pd->port_rcvegrbuf[e];
-                       size_t size = pd->port_rcvegrbuf_size;
-
-                       ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
-                                  "chunk %u/%u\n", base,
-                                  (unsigned long) size,
-                                  e, pd->port_rcvegrbuf_chunks);
-                       dma_free_coherent(&dd->pcidev->dev, size,
-                               base, pd->port_rcvegrbuf_phys[e]);
-               }
-               kfree(pd->port_rcvegrbuf);
-               pd->port_rcvegrbuf = NULL;
-               kfree(pd->port_rcvegrbuf_phys);
-               pd->port_rcvegrbuf_phys = NULL;
-               pd->port_rcvegrbuf_chunks = 0;
-       } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
-               unsigned e;
-               struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
-
-               dd->ipath_port0_skbinfo = NULL;
-               ipath_cdbg(VERBOSE, "free closed port %d "
-                          "ipath_port0_skbinfo @ %p\n", pd->port_port,
-                          skbinfo);
-               for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
-                       if (skbinfo[e].skb) {
-                               pci_unmap_single(dd->pcidev, skbinfo[e].phys,
-                                                dd->ipath_ibmaxlen,
-                                                PCI_DMA_FROMDEVICE);
-                               dev_kfree_skb(skbinfo[e].skb);
-                       }
-               vfree(skbinfo);
-       }
-       kfree(pd->port_tid_pg_list);
-       vfree(pd->subport_uregbase);
-       vfree(pd->subport_rcvegrbuf);
-       vfree(pd->subport_rcvhdr_base);
-       kfree(pd);
-}
-
-static int __init infinipath_init(void)
-{
-       int ret;
-
-       if (ipath_debug & __IPATH_DBG)
-               printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
-
-       /*
-        * These must be called before the driver is registered with
-        * the PCI subsystem.
-        */
-       idr_init(&unit_table);
-
-       ret = pci_register_driver(&ipath_driver);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Unable to register driver: error %d\n", -ret);
-               goto bail_unit;
-       }
-
-       ret = ipath_init_ipathfs();
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
-                      "ipathfs: error %d\n", -ret);
-               goto bail_pci;
-       }
-
-       goto bail;
-
-bail_pci:
-       pci_unregister_driver(&ipath_driver);
-
-bail_unit:
-       idr_destroy(&unit_table);
-
-bail:
-       return ret;
-}
-
-static void __exit infinipath_cleanup(void)
-{
-       ipath_exit_ipathfs();
-
-       ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
-       pci_unregister_driver(&ipath_driver);
-
-       idr_destroy(&unit_table);
-}
-
-/**
- * ipath_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload).  We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize.  For
- * now, we only allow this if no user ports are open that use chip resources
- */
-int ipath_reset_device(int unit)
-{
-       int ret, i;
-       struct ipath_devdata *dd = ipath_lookup(unit);
-       unsigned long flags;
-
-       if (!dd) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       if (atomic_read(&dd->ipath_led_override_timer_active)) {
-               /* Need to stop LED timer, _then_ shut off LEDs */
-               del_timer_sync(&dd->ipath_led_override_timer);
-               atomic_set(&dd->ipath_led_override_timer_active, 0);
-       }
-
-       /* Shut off LEDs after we are sure timer is not running */
-       dd->ipath_led_override = LED_OVER_BOTH_OFF;
-       dd->ipath_f_setextled(dd, 0, 0);
-
-       dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
-
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
-               dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
-                        "not initialized or not present\n", unit);
-               ret = -ENXIO;
-               goto bail;
-       }
-
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       if (dd->ipath_pd)
-               for (i = 1; i < dd->ipath_cfgports; i++) {
-                       if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
-                               continue;
-                       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-                       ipath_dbg("unit %u port %d is in use "
-                                 "(PID %u cmd %s), can't reset\n",
-                                 unit, i,
-                                 pid_nr(dd->ipath_pd[i]->port_pid),
-                                 dd->ipath_pd[i]->port_comm);
-                       ret = -EBUSY;
-                       goto bail;
-               }
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               teardown_sdma(dd);
-
-       dd->ipath_flags &= ~IPATH_INITTED;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-       ret = dd->ipath_f_reset(dd);
-       if (ret == 1) {
-               ipath_dbg("Reinitializing unit %u after reset attempt\n",
-                         unit);
-               ret = ipath_init_chip(dd, 1);
-       } else
-               ret = -EAGAIN;
-       if (ret)
-               ipath_dev_err(dd, "Reinitialize unit %u after "
-                             "reset failed with %d\n", unit, ret);
-       else
-               dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
-                        "resetting\n", unit);
-
-bail:
-       return ret;
-}
-
-/*
- * send a signal to all the processes that have the driver open
- * through the normal interfaces (i.e., everything other than diags
- * interface).  Returns number of signalled processes.
- */
-static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
-{
-       int i, sub, any = 0;
-       struct pid *pid;
-       unsigned long flags;
-
-       if (!dd->ipath_pd)
-               return 0;
-
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       for (i = 1; i < dd->ipath_cfgports; i++) {
-               if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
-                       continue;
-               pid = dd->ipath_pd[i]->port_pid;
-               if (!pid)
-                       continue;
-
-               dev_info(&dd->pcidev->dev, "context %d in use "
-                         "(PID %u), sending signal %d\n",
-                         i, pid_nr(pid), sig);
-               kill_pid(pid, sig, 1);
-               any++;
-               for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
-                       pid = dd->ipath_pd[i]->port_subpid[sub];
-                       if (!pid)
-                               continue;
-                       dev_info(&dd->pcidev->dev, "sub-context "
-                               "%d:%d in use (PID %u), sending "
-                               "signal %d\n", i, sub, pid_nr(pid), sig);
-                       kill_pid(pid, sig, 1);
-                       any++;
-               }
-       }
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-       return any;
-}
-
-static void ipath_hol_signal_down(struct ipath_devdata *dd)
-{
-       if (ipath_signal_procs(dd, SIGSTOP))
-               ipath_dbg("Stopped some processes\n");
-       ipath_cancel_sends(dd, 1);
-}
-
-
-static void ipath_hol_signal_up(struct ipath_devdata *dd)
-{
-       if (ipath_signal_procs(dd, SIGCONT))
-               ipath_dbg("Continued some processes\n");
-}
-
-/*
- * link is down, stop any users processes, and flush pending sends
- * to prevent HoL blocking, then start the HoL timer that
- * periodically continues, then stop procs, so they can detect
- * link down if they want, and do something about it.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void ipath_hol_down(struct ipath_devdata *dd)
-{
-       dd->ipath_hol_state = IPATH_HOL_DOWN;
-       ipath_hol_signal_down(dd);
-       dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
-       dd->ipath_hol_timer.expires = jiffies +
-               msecs_to_jiffies(ipath_hol_timeout_ms);
-       mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
-}
-
-/*
- * link is up, continue any user processes, and ensure timer
- * is a nop, if running.  Let timer keep running, if set; it
- * will nop when it sees the link is up
- */
-void ipath_hol_up(struct ipath_devdata *dd)
-{
-       ipath_hol_signal_up(dd);
-       dd->ipath_hol_state = IPATH_HOL_UP;
-}
-
-/*
- * toggle the running/not running state of user proceses
- * to prevent HoL blocking on chip resources, but still allow
- * user processes to do link down special case handling.
- * Should only be called via the timer
- */
-void ipath_hol_event(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-       if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
-               && dd->ipath_hol_state != IPATH_HOL_UP) {
-               dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
-               ipath_dbg("Stopping processes\n");
-               ipath_hol_signal_down(dd);
-       } else { /* may do "extra" if also in ipath_hol_up() */
-               dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
-               ipath_dbg("Continuing processes\n");
-               ipath_hol_signal_up(dd);
-       }
-       if (dd->ipath_hol_state == IPATH_HOL_UP)
-               ipath_dbg("link's up, don't resched timer\n");
-       else {
-               dd->ipath_hol_timer.expires = jiffies +
-                       msecs_to_jiffies(ipath_hol_timeout_ms);
-               mod_timer(&dd->ipath_hol_timer,
-                       dd->ipath_hol_timer.expires);
-       }
-}
-
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
-{
-       u64 val;
-
-       if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
-               return -1;
-       if (dd->ipath_rx_pol_inv != new_pol_inv) {
-               dd->ipath_rx_pol_inv = new_pol_inv;
-               val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-               val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-                        INFINIPATH_XGXS_RX_POL_SHIFT);
-               val |= ((u64)dd->ipath_rx_pol_inv) <<
-                       INFINIPATH_XGXS_RX_POL_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-       }
-       return 0;
-}
-
-/*
- * Disable and enable the armlaunch error.  Used for PIO bandwidth testing on
- * the 7220, which is count-based, rather than trigger-based.  Safe for the
- * driver check, since it's at init.   Not completely safe when used for
- * user-mode checking, since some error checking can be lost, but not
- * particularly risky, and only has problematic side-effects in the face of
- * very buggy user code.  There is no reference counting, but that's also
- * fine, given the intended use.
- */
-void ipath_enable_armlaunch(struct ipath_devdata *dd)
-{
-       dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-               INFINIPATH_E_SPIOARMLAUNCH);
-       dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-}
-
-void ipath_disable_armlaunch(struct ipath_devdata *dd)
-{
-       /* so don't re-enable if already set */
-       dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
-       dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-}
-
-module_init(infinipath_init);
-module_exit(infinipath_cleanup);
diff --git a/drivers/staging/rdma/ipath/ipath_eeprom.c b/drivers/staging/rdma/ipath/ipath_eeprom.c
deleted file mode 100644 (file)
index ef84107..0000000
+++ /dev/null
@@ -1,1183 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-
-/*
- * InfiniPath I2C driver for a serial eeprom.  This is not a generic
- * I2C interface.  For a start, the device we're using (Atmel AT24C11)
- * doesn't work like a regular I2C device.  It looks like one
- * electrically, but not logically.  Normal I2C devices have a single
- * 7-bit or 10-bit I2C address that they respond to.  Valid 7-bit
- * addresses range from 0x03 to 0x77.  Addresses 0x00 to 0x02 and 0x78
- * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
- * call" address.)  The Atmel device, on the other hand, responds to ALL
- * 7-bit addresses.  It's designed to be the only device on a given I2C
- * bus.  A 7-bit address corresponds to the memory address within the
- * Atmel device itself.
- *
- * Also, the timing requirements mean more than simple software
- * bitbanging, with readbacks from chip to ensure timing (simple udelay
- * is not enough).
- *
- * This all means that accessing the device is specialized enough
- * that using the standard kernel I2C bitbanging interface would be
- * impossible.  For example, the core I2C eeprom driver expects to find
- * a device at one or more of a limited set of addresses only.  It doesn't
- * allow writing to an eeprom.  It also doesn't provide any means of
- * accessing eeprom contents from within the kernel, only via sysfs.
- */
-
-/* Added functionality for IBA7220-based cards */
-#define IPATH_EEPROM_DEV_V1 0xA0
-#define IPATH_EEPROM_DEV_V2 0xA2
-#define IPATH_TEMP_DEV 0x98
-#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
-#define IPATH_NO_DEV (0xFF)
-
-/*
- * The number of I2C chains is proliferating. Table below brings
- * some order to the madness. The basic principle is that the
- * table is scanned from the top, and a "probe" is made to the
- * device probe_dev. If that succeeds, the chain is considered
- * to be of that type, and dd->i2c_chain_type is set to the index+1
- * of the entry.
- * The +1 is so static initialization can mean "unknown, do probe."
- */
-static struct i2c_chain_desc {
-       u8 probe_dev;   /* If seen at probe, chain is this type */
-       u8 eeprom_dev;  /* Dev addr (if any) for EEPROM */
-       u8 temp_dev;    /* Dev Addr (if any) for Temp-sense */
-} i2c_chains[] = {
-       { IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
-       { IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
-       { IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
-       { IPATH_NO_DEV }
-};
-
-enum i2c_type {
-       i2c_line_scl = 0,
-       i2c_line_sda
-};
-
-enum i2c_state {
-       i2c_line_low = 0,
-       i2c_line_high
-};
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_gpio_set - set a GPIO line
- * @dd: the infinipath device
- * @line: the line to set
- * @new_line_state: the state to set
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.
- */
-static int i2c_gpio_set(struct ipath_devdata *dd,
-                       enum i2c_type line,
-                       enum i2c_state new_line_state)
-{
-       u64 out_mask, dir_mask, *gpioval;
-       unsigned long flags = 0;
-
-       gpioval = &dd->ipath_gpio_out;
-
-       if (line == i2c_line_scl) {
-               dir_mask = dd->ipath_gpio_scl;
-               out_mask = (1UL << dd->ipath_gpio_scl_num);
-       } else {
-               dir_mask = dd->ipath_gpio_sda;
-               out_mask = (1UL << dd->ipath_gpio_sda_num);
-       }
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       if (new_line_state == i2c_line_high) {
-               /* tri-state the output rather than force high */
-               dd->ipath_extctrl &= ~dir_mask;
-       } else {
-               /* config line to be an output */
-               dd->ipath_extctrl |= dir_mask;
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-
-       /* set output as well (no real verify) */
-       if (new_line_state == i2c_line_high)
-               *gpioval |= out_mask;
-       else
-               *gpioval &= ~out_mask;
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-       return 0;
-}
-
-/**
- * i2c_gpio_get - get a GPIO line state
- * @dd: the infinipath device
- * @line: the line to get
- * @curr_statep: where to put the line state
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.  curr_state is not set on error.
- */
-static int i2c_gpio_get(struct ipath_devdata *dd,
-                       enum i2c_type line,
-                       enum i2c_state *curr_statep)
-{
-       u64 read_val, mask;
-       int ret;
-       unsigned long flags = 0;
-
-       /* check args */
-       if (curr_statep == NULL) {
-               ret = 1;
-               goto bail;
-       }
-
-       /* config line to be an input */
-       if (line == i2c_line_scl)
-               mask = dd->ipath_gpio_scl;
-       else
-               mask = dd->ipath_gpio_sda;
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       dd->ipath_extctrl &= ~mask;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-       /*
-        * Below is very unlikely to reflect true input state if Output
-        * Enable actually changed.
-        */
-       read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-       if (read_val & mask)
-               *curr_statep = i2c_line_high;
-       else
-               *curr_statep = i2c_line_low;
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the infinipath device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip.  Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct ipath_devdata *dd)
-{
-       (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-       rmb();
-}
-
-static void scl_out(struct ipath_devdata *dd, u8 bit)
-{
-       udelay(1);
-       i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
-
-       i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct ipath_devdata *dd, u8 bit)
-{
-       i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
-
-       i2c_wait_for_writes(dd);
-}
-
-static u8 sda_in(struct ipath_devdata *dd, int wait)
-{
-       enum i2c_state bit;
-
-       if (i2c_gpio_get(dd, i2c_line_sda, &bit))
-               ipath_dbg("get bit failed!\n");
-
-       if (wait)
-               i2c_wait_for_writes(dd);
-
-       return bit == i2c_line_high ? 1U : 0;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the infinipath device
- */
-static int i2c_ackrcv(struct ipath_devdata *dd)
-{
-       u8 ack_received;
-
-       /* AT ENTRY SCL = LOW */
-       /* change direction, ignore data */
-       ack_received = sda_in(dd, 1);
-       scl_out(dd, i2c_line_high);
-       ack_received = sda_in(dd, 1) == 0;
-       scl_out(dd, i2c_line_low);
-       return ack_received;
-}
-
-/**
- * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
- * @dd: the infinipath device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct ipath_devdata *dd)
-{
-       int bit_cntr, data;
-
-       data = 0;
-
-       for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
-               data <<= 1;
-               scl_out(dd, i2c_line_high);
-               data |= sda_in(dd, 0);
-               scl_out(dd, i2c_line_low);
-       }
-       return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the infinipath device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct ipath_devdata *dd, u8 data)
-{
-       int bit_cntr;
-       u8 bit;
-
-       for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
-               bit = (data >> bit_cntr) & 1;
-               sda_out(dd, bit);
-               scl_out(dd, i2c_line_high);
-               scl_out(dd, i2c_line_low);
-       }
-       return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-static void send_ack(struct ipath_devdata *dd)
-{
-       sda_out(dd, i2c_line_low);
-       scl_out(dd, i2c_line_high);
-       scl_out(dd, i2c_line_low);
-       sda_out(dd, i2c_line_high);
-}
-
-/**
- * i2c_startcmd - transmit the start condition, followed by address/cmd
- * @dd: the infinipath device
- * @offset_dir: direction byte
- *
- *      (both clock/data high, clock high, data low while clock is high)
- */
-static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
-{
-       int res;
-
-       /* issue start sequence */
-       sda_out(dd, i2c_line_high);
-       scl_out(dd, i2c_line_high);
-       sda_out(dd, i2c_line_low);
-       scl_out(dd, i2c_line_low);
-
-       /* issue length and direction byte */
-       res = wr_byte(dd, offset_dir);
-
-       if (res)
-               ipath_cdbg(VERBOSE, "No ack to complete start\n");
-
-       return res;
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the infinipath device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct ipath_devdata *dd)
-{
-       scl_out(dd, i2c_line_low);
-       sda_out(dd, i2c_line_low);
-       scl_out(dd, i2c_line_high);
-       sda_out(dd, i2c_line_high);
-       udelay(2);
-}
-
-/**
- * eeprom_reset - reset I2C communication
- * @dd: the infinipath device
- */
-
-static int eeprom_reset(struct ipath_devdata *dd)
-{
-       int clock_cycles_left = 9;
-       u64 *gpioval = &dd->ipath_gpio_out;
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       /* Make sure shadows are consistent */
-       dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
-       *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-       ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
-                  "is %llx\n", (unsigned long long) *gpioval);
-
-       /*
-        * This is to get the i2c into a known state, by first going low,
-        * then tristate sda (and then tristate scl as first thing
-        * in loop)
-        */
-       scl_out(dd, i2c_line_low);
-       sda_out(dd, i2c_line_high);
-
-       /* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
-       while (clock_cycles_left--) {
-               scl_out(dd, i2c_line_high);
-
-               /* SDA seen high, issue START by dropping it while SCL high */
-               if (sda_in(dd, 0)) {
-                       sda_out(dd, i2c_line_low);
-                       scl_out(dd, i2c_line_low);
-                       /* ATMEL spec says must be followed by STOP. */
-                       scl_out(dd, i2c_line_high);
-                       sda_out(dd, i2c_line_high);
-                       ret = 0;
-                       goto bail;
-               }
-
-               scl_out(dd, i2c_line_low);
-       }
-
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/*
- * Probe for I2C device at specified address. Returns 0 for "success"
- * to match rest of this file.
- * Leave bus in "reasonable" state for further commands.
- */
-static int i2c_probe(struct ipath_devdata *dd, int devaddr)
-{
-       int ret;
-
-       ret = eeprom_reset(dd);
-       if (ret) {
-               ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
-                             devaddr);
-               return ret;
-       }
-       /*
-        * Reset no longer leaves bus in start condition, so normal
-        * i2c_startcmd() will do.
-        */
-       ret = i2c_startcmd(dd, devaddr | READ_CMD);
-       if (ret)
-               ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
-                          devaddr);
-       else {
-               /*
-                * Device did respond. Complete a single-byte read, because some
-                * devices apparently cannot handle STOP immediately after they
-                * ACK the start-cmd.
-                */
-               int data;
-               data = rd_byte(dd);
-               stop_cmd(dd);
-               ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
-       }
-       return ret;
-}
-
-/*
- * Returns the "i2c type". This is a pointer to a struct that describes
- * the I2C chain on this board. To minimize impact on struct ipath_devdata,
- * the (small integer) index into the table is actually memoized, rather
- * then the pointer.
- * Memoization is because the type is determined on the first call per chip.
- * An alternative would be to move type determination to early
- * init code.
- */
-static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
-{
-       int idx;
-
-       /* Get memoized index, from previous successful probes */
-       idx = dd->ipath_i2c_chain_type - 1;
-       if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
-               goto done;
-
-       idx = 0;
-       while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
-               /* if probe succeeds, this is type */
-               if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
-                       break;
-               ++idx;
-       }
-
-       /*
-        * Old EEPROM (first entry) may require a reset after probe,
-        * rather than being able to "start" after "stop"
-        */
-       if (idx == 0)
-               eeprom_reset(dd);
-
-       if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
-               idx = -1;
-       else
-               dd->ipath_i2c_chain_type = idx + 1;
-done:
-       return (idx >= 0) ? i2c_chains + idx : NULL;
-}
-
-static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
-                                       u8 eeprom_offset, void *buffer, int len)
-{
-       int ret;
-       struct i2c_chain_desc *icd;
-       u8 *bp = buffer;
-
-       ret = 1;
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       if (icd->eeprom_dev == IPATH_NO_DEV) {
-               /* legacy not-really-I2C */
-               ipath_cdbg(VERBOSE, "Start command only address\n");
-               eeprom_offset = (eeprom_offset << 1) | READ_CMD;
-               ret = i2c_startcmd(dd, eeprom_offset);
-       } else {
-               /* Actual I2C */
-               ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
-               if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
-                       ipath_dbg("Failed EEPROM startcmd\n");
-                       stop_cmd(dd);
-                       ret = 1;
-                       goto bail;
-               }
-               ret = wr_byte(dd, eeprom_offset);
-               stop_cmd(dd);
-               if (ret) {
-                       ipath_dev_err(dd, "Failed to write EEPROM address\n");
-                       ret = 1;
-                       goto bail;
-               }
-               ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
-       }
-       if (ret) {
-               ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
-               stop_cmd(dd);
-               ret = 1;
-               goto bail;
-       }
-
-       /*
-        * eeprom keeps clocking data out as long as we ack, automatically
-        * incrementing the address.
-        */
-       while (len-- > 0) {
-               /* get and store data */
-               *bp++ = rd_byte(dd);
-               /* send ack if not the last byte */
-               if (len)
-                       send_ack(dd);
-       }
-
-       stop_cmd(dd);
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
-                                      const void *buffer, int len)
-{
-       int sub_len;
-       const u8 *bp = buffer;
-       int max_wait_time, i;
-       int ret;
-       struct i2c_chain_desc *icd;
-
-       ret = 1;
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       while (len > 0) {
-               if (icd->eeprom_dev == IPATH_NO_DEV) {
-                       if (i2c_startcmd(dd,
-                                        (eeprom_offset << 1) | WRITE_CMD)) {
-                               ipath_dbg("Failed to start cmd offset %u\n",
-                                       eeprom_offset);
-                               goto failed_write;
-                       }
-               } else {
-                       /* Real I2C */
-                       if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
-                               ipath_dbg("Failed EEPROM startcmd\n");
-                               goto failed_write;
-                       }
-                       ret = wr_byte(dd, eeprom_offset);
-                       if (ret) {
-                               ipath_dev_err(dd, "Failed to write EEPROM "
-                                             "address\n");
-                               goto failed_write;
-                       }
-               }
-
-               sub_len = min(len, 4);
-               eeprom_offset += sub_len;
-               len -= sub_len;
-
-               for (i = 0; i < sub_len; i++) {
-                       if (wr_byte(dd, *bp++)) {
-                               ipath_dbg("no ack after byte %u/%u (%u "
-                                         "total remain)\n", i, sub_len,
-                                         len + sub_len - i);
-                               goto failed_write;
-                       }
-               }
-
-               stop_cmd(dd);
-
-               /*
-                * wait for write complete by waiting for a successful
-                * read (the chip replies with a zero after the write
-                * cmd completes, and before it writes to the eeprom.
-                * The startcmd for the read will fail the ack until
-                * the writes have completed.   We do this inline to avoid
-                * the debug prints that are in the real read routine
-                * if the startcmd fails.
-                * We also use the proper device address, so it doesn't matter
-                * whether we have real eeprom_dev. legacy likes any address.
-                */
-               max_wait_time = 100;
-               while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
-                       stop_cmd(dd);
-                       if (!--max_wait_time) {
-                               ipath_dbg("Did not get successful read to "
-                                         "complete write\n");
-                               goto failed_write;
-                       }
-               }
-               /* now read (and ignore) the resulting byte */
-               rd_byte(dd);
-               stop_cmd(dd);
-       }
-
-       ret = 0;
-       goto bail;
-
-failed_write:
-       stop_cmd(dd);
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: address to read from
- * @buffer: where to store result
- * @len: number of bytes to receive
- */
-int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
-                       void *buff, int len)
-{
-       int ret;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       return ret;
-}
-
-/**
- * ipath_eeprom_write - writes data to the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: where to place data
- * @buffer: data to write
- * @len: number of bytes to write
- */
-int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
-                       const void *buff, int len)
-{
-       int ret;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       return ret;
-}
-
-static u8 flash_csum(struct ipath_flash *ifp, int adjust)
-{
-       u8 *ip = (u8 *) ifp;
-       u8 csum = 0, len;
-
-       /*
-        * Limit length checksummed to max length of actual data.
-        * Checksum of erased eeprom will still be bad, but we avoid
-        * reading past the end of the buffer we were passed.
-        */
-       len = ifp->if_length;
-       if (len > sizeof(struct ipath_flash))
-               len = sizeof(struct ipath_flash);
-       while (len--)
-               csum += *ip++;
-       csum -= ifp->if_csum;
-       csum = ~csum;
-       if (adjust)
-               ifp->if_csum = csum;
-
-       return csum;
-}
-
-/**
- * ipath_get_guid - get the GUID from the i2c device
- * @dd: the infinipath device
- *
- * We have the capability to use the ipath_nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void ipath_get_eeprom_info(struct ipath_devdata *dd)
-{
-       void *buf;
-       struct ipath_flash *ifp;
-       __be64 guid;
-       int len, eep_stat;
-       u8 csum, *bguid;
-       int t = dd->ipath_unit;
-       struct ipath_devdata *dd0 = ipath_lookup(0);
-
-       if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
-               u8 oguid;
-               dd->ipath_guid = dd0->ipath_guid;
-               bguid = (u8 *) & dd->ipath_guid;
-
-               oguid = bguid[7];
-               bguid[7] += t;
-               if (oguid > bguid[7]) {
-                       if (bguid[6] == 0xff) {
-                               if (bguid[5] == 0xff) {
-                                       ipath_dev_err(
-                                               dd,
-                                               "Can't set %s GUID from "
-                                               "base, wraps to OUI!\n",
-                                               ipath_get_unit_name(t));
-                                       dd->ipath_guid = 0;
-                                       goto bail;
-                               }
-                               bguid[5]++;
-                       }
-                       bguid[6]++;
-               }
-               dd->ipath_nguid = 1;
-
-               ipath_dbg("nguid %u, so adding %u to device 0 guid, "
-                         "for %llx\n",
-                         dd0->ipath_nguid, t,
-                         (unsigned long long) be64_to_cpu(dd->ipath_guid));
-               goto bail;
-       }
-
-       /*
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        * */
-       len = sizeof(struct ipath_flash);
-       buf = vmalloc(len);
-       if (!buf) {
-               ipath_dev_err(dd, "Couldn't allocate memory to read %u "
-                             "bytes from eeprom for GUID\n", len);
-               goto bail;
-       }
-
-       mutex_lock(&dd->ipath_eep_lock);
-       eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
-       mutex_unlock(&dd->ipath_eep_lock);
-
-       if (eep_stat) {
-               ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
-               goto done;
-       }
-       ifp = (struct ipath_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
-                        "0x%x, not 0x%x\n", csum, ifp->if_csum);
-               goto done;
-       }
-       if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
-           *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
-               ipath_dev_err(dd, "Invalid GUID %llx from flash; "
-                             "ignoring\n",
-                             *(unsigned long long *) ifp->if_guid);
-               /* don't allow GUID if all 0 or all 1's */
-               goto done;
-       }
-
-       /* complain, but allow it */
-       if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
-               dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
-                        "default, probably not correct!\n",
-                        *(unsigned long long *) ifp->if_guid);
-
-       bguid = ifp->if_guid;
-       if (!bguid[0] && !bguid[1] && !bguid[2]) {
-               /* original incorrect GUID format in flash; fix in
-                * core copy, by shifting up 2 octets; don't need to
-                * change top octet, since both it and shifted are
-                * 0.. */
-               bguid[1] = bguid[3];
-               bguid[2] = bguid[4];
-               bguid[3] = bguid[4] = 0;
-               guid = *(__be64 *) ifp->if_guid;
-               ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
-                          "shifting 2 octets\n");
-       } else
-               guid = *(__be64 *) ifp->if_guid;
-       dd->ipath_guid = guid;
-       dd->ipath_nguid = ifp->if_numguid;
-       /*
-        * Things are slightly complicated by the desire to transparently
-        * support both the Pathscale 10-digit serial number and the QLogic
-        * 13-character version.
-        */
-       if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
-               && ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
-               /* This board has a Serial-prefix, which is stored
-                * elsewhere for backward-compatibility.
-                */
-               char *snp = dd->ipath_serial;
-               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-               snp[sizeof ifp->if_sprefix] = '\0';
-               len = strlen(snp);
-               snp += len;
-               len = (sizeof dd->ipath_serial) - len;
-               if (len > sizeof ifp->if_serial) {
-                       len = sizeof ifp->if_serial;
-               }
-               memcpy(snp, ifp->if_serial, len);
-       } else
-               memcpy(dd->ipath_serial, ifp->if_serial,
-                      sizeof ifp->if_serial);
-       if (!strstr(ifp->if_comment, "Tested successfully"))
-               ipath_dev_err(dd, "Board SN %s did not pass functional "
-                       "test: %s\n", dd->ipath_serial,
-                       ifp->if_comment);
-
-       ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
-                  (unsigned long long) be64_to_cpu(dd->ipath_guid));
-
-       memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
-       /*
-        * Power-on (actually "active") hours are kept as little-endian value
-        * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-        * atomic_t while running.
-        */
-       atomic_set(&dd->ipath_active_time, 0);
-       dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
-done:
-       vfree(buf);
-
-bail:;
-}
-
-/**
- * ipath_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the infinipath device
- *
- * Although the time is kept as seconds in the ipath_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct ipath_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-
-int ipath_update_eeprom_log(struct ipath_devdata *dd)
-{
-       void *buf;
-       struct ipath_flash *ifp;
-       int len, hi_water;
-       uint32_t new_time, new_hrs;
-       u8 csum;
-       int ret, idx;
-       unsigned long flags;
-
-       /* first, check if we actually need to do anything. */
-       ret = 0;
-       for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-               if (dd->ipath_eep_st_new_errs[idx]) {
-                       ret = 1;
-                       break;
-               }
-       }
-       new_time = atomic_read(&dd->ipath_active_time);
-
-       if (ret == 0 && new_time < 3600)
-               return 0;
-
-       /*
-        * The quick-check above determined that there is something worthy
-        * of logging, so get current contents and do a more detailed idea.
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        */
-       len = sizeof(struct ipath_flash);
-       buf = vmalloc(len);
-       ret = 1;
-       if (!buf) {
-               ipath_dev_err(dd, "Couldn't allocate memory to read %u "
-                               "bytes from eeprom for logging\n", len);
-               goto bail;
-       }
-
-       /* Grab semaphore and read current EEPROM. If we get an
-        * error, let go, but if not, keep it until we finish write.
-        */
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (ret) {
-               ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-               goto free_bail;
-       }
-       ret = ipath_eeprom_internal_read(dd, 0, buf, len);
-       if (ret) {
-               mutex_unlock(&dd->ipath_eep_lock);
-               ipath_dev_err(dd, "Unable read EEPROM for logging\n");
-               goto free_bail;
-       }
-       ifp = (struct ipath_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               mutex_unlock(&dd->ipath_eep_lock);
-               ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-                               csum, ifp->if_csum);
-               ret = 1;
-               goto free_bail;
-       }
-       hi_water = 0;
-       spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-       for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-               int new_val = dd->ipath_eep_st_new_errs[idx];
-               if (new_val) {
-                       /*
-                        * If we have seen any errors, add to EEPROM values
-                        * We need to saturate at 0xFF (255) and we also
-                        * would need to adjust the checksum if we were
-                        * trying to minimize EEPROM traffic
-                        * Note that we add to actual current count in EEPROM,
-                        * in case it was altered while we were running.
-                        */
-                       new_val += ifp->if_errcntp[idx];
-                       if (new_val > 0xFF)
-                               new_val = 0xFF;
-                       if (ifp->if_errcntp[idx] != new_val) {
-                               ifp->if_errcntp[idx] = new_val;
-                               hi_water = offsetof(struct ipath_flash,
-                                               if_errcntp) + idx;
-                       }
-                       /*
-                        * update our shadow (used to minimize EEPROM
-                        * traffic), to match what we are about to write.
-                        */
-                       dd->ipath_eep_st_errs[idx] = new_val;
-                       dd->ipath_eep_st_new_errs[idx] = 0;
-               }
-       }
-       /*
-        * now update active-time. We would like to round to the nearest hour
-        * but unless atomic_t are sure to be proper signed ints we cannot,
-        * because we need to account for what we "transfer" to EEPROM and
-        * if we log an hour at 31 minutes, then we would need to set
-        * active_time to -29 to accurately count the _next_ hour.
-        */
-       if (new_time >= 3600) {
-               new_hrs = new_time / 3600;
-               atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
-               new_hrs += dd->ipath_eep_hrs;
-               if (new_hrs > 0xFFFF)
-                       new_hrs = 0xFFFF;
-               dd->ipath_eep_hrs = new_hrs;
-               if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-                       ifp->if_powerhour[0] = new_hrs & 0xFF;
-                       hi_water = offsetof(struct ipath_flash, if_powerhour);
-               }
-               if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-                       ifp->if_powerhour[1] = new_hrs >> 8;
-                       hi_water = offsetof(struct ipath_flash, if_powerhour)
-                                       + 1;
-               }
-       }
-       /*
-        * There is a tiny possibility that we could somehow fail to write
-        * the EEPROM after updating our shadows, but problems from holding
-        * the spinlock too long are a much bigger issue.
-        */
-       spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-       if (hi_water) {
-               /* we made some change to the data, uopdate cksum and write */
-               csum = flash_csum(ifp, 1);
-               ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
-       }
-       mutex_unlock(&dd->ipath_eep_lock);
-       if (ret)
-               ipath_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-       vfree(buf);
-bail:
-       return ret;
-
-}
-
-/**
- * ipath_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the infinipath device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
-{
-       uint new_val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-       new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
-       if (new_val > 255)
-               new_val = 255;
-       dd->ipath_eep_st_new_errs[eidx] = new_val;
-       spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-       return;
-}
-
-static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
-{
-       int ret;
-       struct i2c_chain_desc *icd;
-
-       ret = -ENOENT;
-
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       if (icd->temp_dev == IPATH_NO_DEV) {
-               /* tempsense only exists on new, real-I2C boards */
-               ret = -ENXIO;
-               goto bail;
-       }
-
-       if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
-               ipath_dbg("Failed tempsense startcmd\n");
-               stop_cmd(dd);
-               ret = -ENXIO;
-               goto bail;
-       }
-       ret = wr_byte(dd, regnum);
-       stop_cmd(dd);
-       if (ret) {
-               ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
-                             regnum);
-               ret = -ENXIO;
-               goto bail;
-       }
-       if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
-               ipath_dbg("Failed tempsense RD startcmd\n");
-               stop_cmd(dd);
-               ret = -ENXIO;
-               goto bail;
-       }
-       /*
-        * We can only clock out one byte per command, sensibly
-        */
-       ret = rd_byte(dd);
-       stop_cmd(dd);
-
-bail:
-       return ret;
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-
-/**
- * ipath_tempsense_read - read register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
-{
-       int ret;
-
-       if (regnum > 7)
-               return -EINVAL;
-
-       /* return a bogus value for (the one) register we do not have */
-       if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
-               return 0;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_tempsense_internal_read(dd, regnum);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       /*
-        * There are three possibilities here:
-        * ret is actual value (0..255)
-        * ret is -ENXIO or -EINVAL from code in this file
-        * ret is -EINTR from mutex_lock_interruptible.
-        */
-       return ret;
-}
-
-static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
-                                         u8 regnum, u8 data)
-{
-       int ret = -ENOENT;
-       struct i2c_chain_desc *icd;
-
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       if (icd->temp_dev == IPATH_NO_DEV) {
-               /* tempsense only exists on new, real-I2C boards */
-               ret = -ENXIO;
-               goto bail;
-       }
-       if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
-               ipath_dbg("Failed tempsense startcmd\n");
-               stop_cmd(dd);
-               ret = -ENXIO;
-               goto bail;
-       }
-       ret = wr_byte(dd, regnum);
-       if (ret) {
-               stop_cmd(dd);
-               ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
-                             regnum);
-               ret = -ENXIO;
-               goto bail;
-       }
-       ret = wr_byte(dd, data);
-       stop_cmd(dd);
-       ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
-       if (ret) {
-               ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
-                             regnum);
-               ret = -ENXIO;
-       }
-
-bail:
-       return ret;
-}
-
-#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
-
-/**
- * ipath_tempsense_write - write register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to write
- * @data: data to write
- *
- * returns 0 for success or < 0 for error
- */
-int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
-{
-       int ret;
-
-       if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
-               return -EINVAL;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_tempsense_internal_write(dd, regnum, data);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       /*
-        * There are three possibilities here:
-        * ret is 0 for success
-        * ret is -ENXIO or -EINVAL from code in this file
-        * ret is -EINTR from mutex_lock_interruptible.
-        */
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
deleted file mode 100644 (file)
index 6187b84..0000000
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/cpu.h>
-#include <linux/uio.h>
-#include <asm/pgtable.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-#include "ipath_user_sdma.h"
-
-static int ipath_open(struct inode *, struct file *);
-static int ipath_close(struct inode *, struct file *);
-static ssize_t ipath_write(struct file *, const char __user *, size_t,
-                          loff_t *);
-static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
-static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
-static int ipath_mmap(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics.  Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations ipath_file_ops = {
-       .owner = THIS_MODULE,
-       .write = ipath_write,
-       .write_iter = ipath_write_iter,
-       .open = ipath_open,
-       .release = ipath_close,
-       .poll = ipath_poll,
-       .mmap = ipath_mmap,
-       .llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
-       struct page *page;
-       u64 paddr = 0;
-
-       page = vmalloc_to_page(p);
-       if (page)
-               paddr = page_to_pfn(page) << PAGE_SHIFT;
-
-       return paddr;
-}
-
-static int ipath_get_base_info(struct file *fp,
-                              void __user *ubase, size_t ubase_size)
-{
-       struct ipath_portdata *pd = port_fp(fp);
-       int ret = 0;
-       struct ipath_base_info *kinfo = NULL;
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned subport_cnt;
-       int shared, master;
-       size_t sz;
-
-       subport_cnt = pd->port_subport_cnt;
-       if (!subport_cnt) {
-               shared = 0;
-               master = 0;
-               subport_cnt = 1;
-       } else {
-               shared = 1;
-               master = !subport_fp(fp);
-       }
-
-       sz = sizeof(*kinfo);
-       /* If port sharing is not requested, allow the old size structure */
-       if (!shared)
-               sz -= 7 * sizeof(u64);
-       if (ubase_size < sz) {
-               ipath_cdbg(PROC,
-                          "Base size %zu, need %zu (version mismatch?)\n",
-                          ubase_size, sz);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
-       if (kinfo == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       ret = dd->ipath_f_get_base_info(pd, kinfo);
-       if (ret < 0)
-               goto bail;
-
-       kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
-       kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
-       kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
-       kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
-       /*
-        * have to mmap whole thing
-        */
-       kinfo->spi_rcv_egrbuftotlen =
-               pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-       kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
-       kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
-               pd->port_rcvegrbuf_chunks;
-       kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
-       if (master)
-               kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
-       /*
-        * for this use, may be ipath_cfgports summed over all chips that
-        * are are configured and present
-        */
-       kinfo->spi_nports = dd->ipath_cfgports;
-       /* unit (chip/board) our port is on */
-       kinfo->spi_unit = dd->ipath_unit;
-       /* for now, only a single page */
-       kinfo->spi_tid_maxsize = PAGE_SIZE;
-
-       /*
-        * Doing this per port, and based on the skip value, etc.  This has
-        * to be the actual buffer size, since the protocol code treats it
-        * as an array.
-        *
-        * These have to be set to user addresses in the user code via mmap.
-        * These values are used on return to user code for the mmap target
-        * addresses only.  For 32 bit, same 44 bit address problem, so use
-        * the physical address, not virtual.  Before 2.6.11, using the
-        * page_address() macro worked, but in 2.6.11, even that returns the
-        * full 64 bit address (upper bits all 1's).  So far, using the
-        * physical addresses (or chip offsets, for chip mapping) works, but
-        * no doubt some future kernel release will change that, and we'll be
-        * on to yet another method of dealing with this.
-        */
-       kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
-       kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
-       kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
-       kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
-       kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
-               (void *) dd->ipath_statusp -
-               (void *) dd->ipath_pioavailregs_dma;
-       if (!shared) {
-               kinfo->spi_piocnt = pd->port_piocnt;
-               kinfo->spi_piobufbase = (u64) pd->port_piobufs;
-               kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
-                       dd->ipath_ureg_align * pd->port_port;
-       } else if (master) {
-               kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
-                                   (pd->port_piocnt % subport_cnt);
-               /* Master's PIO buffers are after all the slave's */
-               kinfo->spi_piobufbase = (u64) pd->port_piobufs +
-                       dd->ipath_palign *
-                       (pd->port_piocnt - kinfo->spi_piocnt);
-       } else {
-               unsigned slave = subport_fp(fp) - 1;
-
-               kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
-               kinfo->spi_piobufbase = (u64) pd->port_piobufs +
-                       dd->ipath_palign * kinfo->spi_piocnt * slave;
-       }
-
-       if (shared) {
-               kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
-                       dd->ipath_ureg_align * pd->port_port;
-               kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
-               kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
-               kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
-
-               kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
-                       PAGE_SIZE * subport_fp(fp));
-
-               kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
-                       pd->port_rcvhdrq_size * subport_fp(fp));
-               kinfo->spi_rcvhdr_tailaddr = 0;
-               kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
-                       pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
-                       subport_fp(fp));
-
-               kinfo->spi_subport_uregbase =
-                       cvt_kvaddr(pd->subport_uregbase);
-               kinfo->spi_subport_rcvegrbuf =
-                       cvt_kvaddr(pd->subport_rcvegrbuf);
-               kinfo->spi_subport_rcvhdr_base =
-                       cvt_kvaddr(pd->subport_rcvhdr_base);
-               ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
-                       kinfo->spi_port, kinfo->spi_runtime_flags,
-                       (unsigned long long) kinfo->spi_subport_uregbase,
-                       (unsigned long long) kinfo->spi_subport_rcvegrbuf,
-                       (unsigned long long) kinfo->spi_subport_rcvhdr_base);
-       }
-
-       /*
-        * All user buffers are 2KB buffers.  If we ever support
-        * giving 4KB buffers to user processes, this will need some
-        * work.
-        */
-       kinfo->spi_pioindex = (kinfo->spi_piobufbase -
-               (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
-       kinfo->spi_pioalign = dd->ipath_palign;
-
-       kinfo->spi_qpair = IPATH_KD_QP;
-       /*
-        * user mode PIO buffers are always 2KB, even when 4KB can
-        * be received, and sent via the kernel; this is ibmaxlen
-        * for 2K MTU.
-        */
-       kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
-       kinfo->spi_mtu = dd->ipath_ibmaxlen;    /* maxlen, not ibmtu */
-       kinfo->spi_port = pd->port_port;
-       kinfo->spi_subport = subport_fp(fp);
-       kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
-       kinfo->spi_hw_version = dd->ipath_revision;
-
-       if (master) {
-               kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
-       }
-
-       sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
-       if (copy_to_user(ubase, kinfo, sz))
-               ret = -EFAULT;
-
-bail:
-       kfree(kinfo);
-       return ret;
-}
-
-/**
- * ipath_tid_update - update a port TID
- * @pd: the port
- * @fp: the ipath device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller.   To make it easier to
- * catch bugs, and to reduce search time, we keep a cursor for
- * each port, walking the shadow tid array to find one that's not
- * in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
-                           const struct ipath_tid_info *ti)
-{
-       int ret = 0, ntids;
-       u32 tid, porttid, cnt, i, tidcnt, tidoff;
-       u16 *tidlist;
-       struct ipath_devdata *dd = pd->port_dd;
-       u64 physaddr;
-       unsigned long vaddr;
-       u64 __iomem *tidbase;
-       unsigned long tidmap[8];
-       struct page **pagep = NULL;
-       unsigned subport = subport_fp(fp);
-
-       if (!dd->ipath_pageshadow) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       cnt = ti->tidcnt;
-       if (!cnt) {
-               ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
-                         (unsigned long long) ti->tidlist);
-               /*
-                * Should we treat as success?  likely a bug
-                */
-               ret = -EFAULT;
-               goto done;
-       }
-       porttid = pd->port_port * dd->ipath_rcvtidcnt;
-       if (!pd->port_subport_cnt) {
-               tidcnt = dd->ipath_rcvtidcnt;
-               tid = pd->port_tidcursor;
-               tidoff = 0;
-       } else if (!subport) {
-               tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
-                        (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
-               tidoff = dd->ipath_rcvtidcnt - tidcnt;
-               porttid += tidoff;
-               tid = tidcursor_fp(fp);
-       } else {
-               tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
-               tidoff = tidcnt * (subport - 1);
-               porttid += tidoff;
-               tid = tidcursor_fp(fp);
-       }
-       if (cnt > tidcnt) {
-               /* make sure it all fits in port_tid_pg_list */
-               dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
-                        "TIDs, only trying max (%u)\n", cnt, tidcnt);
-               cnt = tidcnt;
-       }
-       pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
-       tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
-
-       memset(tidmap, 0, sizeof(tidmap));
-       /* before decrement; chip actual # */
-       ntids = tidcnt;
-       tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
-                                  dd->ipath_rcvtidbase +
-                                  porttid * sizeof(*tidbase));
-
-       ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
-                  pd->port_port, cnt, tid, tidbase);
-
-       /* virtual address of first page in transfer */
-       vaddr = ti->tidvaddr;
-       if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
-                      cnt * PAGE_SIZE)) {
-               ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
-                         (void *)vaddr, cnt);
-               ret = -EFAULT;
-               goto done;
-       }
-       ret = ipath_get_user_pages(vaddr, cnt, pagep);
-       if (ret) {
-               if (ret == -EBUSY) {
-                       ipath_dbg("Failed to lock addr %p, %u pages "
-                                 "(already locked)\n",
-                                 (void *) vaddr, cnt);
-                       /*
-                        * for now, continue, and see what happens but with
-                        * the new implementation, this should never happen,
-                        * unless perhaps the user has mpin'ed the pages
-                        * themselves (something we need to test)
-                        */
-                       ret = 0;
-               } else {
-                       dev_info(&dd->pcidev->dev,
-                                "Failed to lock addr %p, %u pages: "
-                                "errno %d\n", (void *) vaddr, cnt, -ret);
-                       goto done;
-               }
-       }
-       for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
-               for (; ntids--; tid++) {
-                       if (tid == tidcnt)
-                               tid = 0;
-                       if (!dd->ipath_pageshadow[porttid + tid])
-                               break;
-               }
-               if (ntids < 0) {
-                       /*
-                        * oops, wrapped all the way through their TIDs,
-                        * and didn't have enough free; see comments at
-                        * start of routine
-                        */
-                       ipath_dbg("Not enough free TIDs for %u pages "
-                                 "(index %d), failing\n", cnt, i);
-                       i--;    /* last tidlist[i] not filled in */
-                       ret = -ENOMEM;
-                       break;
-               }
-               tidlist[i] = tid + tidoff;
-               ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
-                          "vaddr %lx\n", i, tid + tidoff, vaddr);
-               /* we "know" system pages and TID pages are same size */
-               dd->ipath_pageshadow[porttid + tid] = pagep[i];
-               dd->ipath_physshadow[porttid + tid] = ipath_map_page(
-                       dd->pcidev, pagep[i], 0, PAGE_SIZE,
-                       PCI_DMA_FROMDEVICE);
-               /*
-                * don't need atomic or it's overhead
-                */
-               __set_bit(tid, tidmap);
-               physaddr = dd->ipath_physshadow[porttid + tid];
-               ipath_stats.sps_pagelocks++;
-               ipath_cdbg(VERBOSE,
-                          "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
-                          tid, vaddr, (unsigned long long) physaddr,
-                          pagep[i]);
-               dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
-                                   physaddr);
-               /*
-                * don't check this tid in ipath_portshadow, since we
-                * just filled it in; start with the next one.
-                */
-               tid++;
-       }
-
-       if (ret) {
-               u32 limit;
-       cleanup:
-               /* jump here if copy out of updated info failed... */
-               ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
-                         -ret, i, cnt);
-               /* same code that's in ipath_free_tid() */
-               limit = sizeof(tidmap) * BITS_PER_BYTE;
-               if (limit > tidcnt)
-                       /* just in case size changes in future */
-                       limit = tidcnt;
-               tid = find_first_bit((const unsigned long *)tidmap, limit);
-               for (; tid < limit; tid++) {
-                       if (!test_bit(tid, tidmap))
-                               continue;
-                       if (dd->ipath_pageshadow[porttid + tid]) {
-                               ipath_cdbg(VERBOSE, "Freeing TID %u\n",
-                                          tid);
-                               dd->ipath_f_put_tid(dd, &tidbase[tid],
-                                                   RCVHQ_RCV_TYPE_EXPECTED,
-                                                   dd->ipath_tidinvalid);
-                               pci_unmap_page(dd->pcidev,
-                                       dd->ipath_physshadow[porttid + tid],
-                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                               dd->ipath_pageshadow[porttid + tid] = NULL;
-                               ipath_stats.sps_pageunlocks++;
-                       }
-               }
-               ipath_release_user_pages(pagep, cnt);
-       } else {
-               /*
-                * Copy the updated array, with ipath_tid's filled in, back
-                * to user.  Since we did the copy in already, this "should
-                * never fail" If it does, we have to clean up...
-                */
-               if (copy_to_user((void __user *)
-                                (unsigned long) ti->tidlist,
-                                tidlist, cnt * sizeof(*tidlist))) {
-                       ret = -EFAULT;
-                       goto cleanup;
-               }
-               if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-                                tidmap, sizeof tidmap)) {
-                       ret = -EFAULT;
-                       goto cleanup;
-               }
-               if (tid == tidcnt)
-                       tid = 0;
-               if (!pd->port_subport_cnt)
-                       pd->port_tidcursor = tid;
-               else
-                       tidcursor_fp(fp) = tid;
-       }
-
-done:
-       if (ret)
-               ipath_dbg("Failed to map %u TID pages, failing with %d\n",
-                         ti->tidcnt, -ret);
-       return ret;
-}
-
-/**
- * ipath_tid_free - free a port TID
- * @pd: the port
- * @subport: the subport
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.  We check that the TID is in range for this port
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted.  We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-
-static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
-                         const struct ipath_tid_info *ti)
-{
-       int ret = 0;
-       u32 tid, porttid, cnt, limit, tidcnt;
-       struct ipath_devdata *dd = pd->port_dd;
-       u64 __iomem *tidbase;
-       unsigned long tidmap[8];
-
-       if (!dd->ipath_pageshadow) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-                          sizeof tidmap)) {
-               ret = -EFAULT;
-               goto done;
-       }
-
-       porttid = pd->port_port * dd->ipath_rcvtidcnt;
-       if (!pd->port_subport_cnt)
-               tidcnt = dd->ipath_rcvtidcnt;
-       else if (!subport) {
-               tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
-                        (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
-               porttid += dd->ipath_rcvtidcnt - tidcnt;
-       } else {
-               tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
-               porttid += tidcnt * (subport - 1);
-       }
-       tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-                                  dd->ipath_rcvtidbase +
-                                  porttid * sizeof(*tidbase));
-
-       limit = sizeof(tidmap) * BITS_PER_BYTE;
-       if (limit > tidcnt)
-               /* just in case size changes in future */
-               limit = tidcnt;
-       tid = find_first_bit(tidmap, limit);
-       ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
-                  "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
-                  limit, tid, porttid);
-       for (cnt = 0; tid < limit; tid++) {
-               /*
-                * small optimization; if we detect a run of 3 or so without
-                * any set, use find_first_bit again.  That's mainly to
-                * accelerate the case where we wrapped, so we have some at
-                * the beginning, and some at the end, and a big gap
-                * in the middle.
-                */
-               if (!test_bit(tid, tidmap))
-                       continue;
-               cnt++;
-               if (dd->ipath_pageshadow[porttid + tid]) {
-                       struct page *p;
-                       p = dd->ipath_pageshadow[porttid + tid];
-                       dd->ipath_pageshadow[porttid + tid] = NULL;
-                       ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
-                                  pid_nr(pd->port_pid), tid);
-                       dd->ipath_f_put_tid(dd, &tidbase[tid],
-                                           RCVHQ_RCV_TYPE_EXPECTED,
-                                           dd->ipath_tidinvalid);
-                       pci_unmap_page(dd->pcidev,
-                               dd->ipath_physshadow[porttid + tid],
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                       ipath_release_user_pages(&p, 1);
-                       ipath_stats.sps_pageunlocks++;
-               } else
-                       ipath_dbg("Unused tid %u, ignoring\n", tid);
-       }
-       if (cnt != ti->tidcnt)
-               ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
-                         ti->tidcnt, cnt);
-done:
-       if (ret)
-               ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
-                         ti->tidcnt, -ret);
-       return ret;
-}
-
-/**
- * ipath_set_part_key - set a partition key
- * @pd: the port
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed).  This is somewhat tricky, since multiple ports may set
- * the same key, so we reference count them, and clean up at exit.  All 4
- * partition keys are packed into a single infinipath register.  It's an
- * error for a process to set the same pkey multiple times.  We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that.  I've used the atomic operations, and no locking, and only make
- * a single pass through what's available.  This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       int i, any = 0, pidx = -1;
-       u16 lkey = key & 0x7FFF;
-       int ret;
-
-       if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
-               /* nothing to do; this key always valid */
-               ret = 0;
-               goto bail;
-       }
-
-       ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
-                  "%hx:%x %hx:%x %hx:%x %hx:%x\n",
-                  pd->port_port, key, dd->ipath_pkeys[0],
-                  atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
-                  atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
-                  atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
-                  atomic_read(&dd->ipath_pkeyrefs[3]));
-
-       if (!lkey) {
-               ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
-                          pd->port_port);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /*
-        * Set the full membership bit, because it has to be
-        * set in the register or the packet, and it seems
-        * cleaner to set in the register than to force all
-        * callers to set it. (see bug 4331)
-        */
-       key |= 0x8000;
-
-       for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-               if (!pd->port_pkeys[i] && pidx == -1)
-                       pidx = i;
-               if (pd->port_pkeys[i] == key) {
-                       ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
-                                  "(%x) more than once\n",
-                                  pd->port_port, key);
-                       ret = -EEXIST;
-                       goto bail;
-               }
-       }
-       if (pidx == -1) {
-               ipath_dbg("All pkeys for port %u already in use, "
-                         "can't set %x\n", pd->port_port, key);
-               ret = -EBUSY;
-               goto bail;
-       }
-       for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i]) {
-                       any++;
-                       continue;
-               }
-               if (dd->ipath_pkeys[i] == key) {
-                       atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
-
-                       if (atomic_inc_return(pkrefs) > 1) {
-                               pd->port_pkeys[pidx] = key;
-                               ipath_cdbg(VERBOSE, "p%u set key %x "
-                                          "matches #%d, count now %d\n",
-                                          pd->port_port, key, i,
-                                          atomic_read(pkrefs));
-                               ret = 0;
-                               goto bail;
-                       } else {
-                               /*
-                                * lost race, decrement count, catch below
-                                */
-                               atomic_dec(pkrefs);
-                               ipath_cdbg(VERBOSE, "Lost race, count was "
-                                          "0, after dec, it's %d\n",
-                                          atomic_read(pkrefs));
-                               any++;
-                       }
-               }
-               if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-                       /*
-                        * It makes no sense to have both the limited and
-                        * full membership PKEY set at the same time since
-                        * the unlimited one will disable the limited one.
-                        */
-                       ret = -EEXIST;
-                       goto bail;
-               }
-       }
-       if (!any) {
-               ipath_dbg("port %u, all pkeys already in use, "
-                         "can't set %x\n", pd->port_port, key);
-               ret = -EBUSY;
-               goto bail;
-       }
-       for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i] &&
-                   atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-                       u64 pkey;
-
-                       /* for ipathstats, etc. */
-                       ipath_stats.sps_pkeys[i] = lkey;
-                       pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
-                       pkey =
-                               (u64) dd->ipath_pkeys[0] |
-                               ((u64) dd->ipath_pkeys[1] << 16) |
-                               ((u64) dd->ipath_pkeys[2] << 32) |
-                               ((u64) dd->ipath_pkeys[3] << 48);
-                       ipath_cdbg(PROC, "p%u set key %x in #%d, "
-                                  "portidx %d, new pkey reg %llx\n",
-                                  pd->port_port, key, i, pidx,
-                                  (unsigned long long) pkey);
-                       ipath_write_kreg(
-                               dd, dd->ipath_kregs->kr_partitionkey, pkey);
-
-                       ret = 0;
-                       goto bail;
-               }
-       }
-       ipath_dbg("port %u, all pkeys already in use 2nd pass, "
-                 "can't set %x\n", pd->port_port, key);
-       ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_manage_rcvq - manage a port's receive queue
- * @pd: the port
- * @subport: the subport
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the port, for use in queue
- * overflow conditions.  start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
-                            int start_stop)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-
-       ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
-                  start_stop ? "en" : "dis", dd->ipath_unit,
-                  pd->port_port, subport);
-       if (subport)
-               goto bail;
-       /* atomically clear receive enable port. */
-       if (start_stop) {
-               /*
-                * On enable, force in-memory copy of the tail register to
-                * 0, so that protocol code doesn't have to worry about
-                * whether or not the chip has yet updated the in-memory
-                * copy or not on return from the system call. The chip
-                * always resets it's tail register back to 0 on a
-                * transition from disabled to enabled.  This could cause a
-                * problem if software was broken, and did the enable w/o
-                * the disable, but eventually the in-memory copy will be
-                * updated and correct itself, even in the face of software
-                * bugs.
-                */
-               if (pd->port_rcvhdrtail_kvaddr)
-                       ipath_clear_rcvhdrtail(pd);
-               set_bit(dd->ipath_r_portenable_shift + pd->port_port,
-                       &dd->ipath_rcvctrl);
-       } else
-               clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
-                         &dd->ipath_rcvctrl);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-       /* now be sure chip saw it before we return */
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       if (start_stop) {
-               /*
-                * And try to be sure that tail reg update has happened too.
-                * This should in theory interlock with the RXE changes to
-                * the tail register.  Don't assign it to the tail register
-                * in memory copy, since we could overwrite an update by the
-                * chip if we did.
-                */
-               ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-       }
-       /* always; new head should be equal to new tail; see above */
-bail:
-       return 0;
-}
-
-static void ipath_clean_part_key(struct ipath_portdata *pd,
-                                struct ipath_devdata *dd)
-{
-       int i, j, pchanged = 0;
-       u64 oldpkey;
-
-       /* for debugging only */
-       oldpkey = (u64) dd->ipath_pkeys[0] |
-               ((u64) dd->ipath_pkeys[1] << 16) |
-               ((u64) dd->ipath_pkeys[2] << 32) |
-               ((u64) dd->ipath_pkeys[3] << 48);
-
-       for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-               if (!pd->port_pkeys[i])
-                       continue;
-               ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
-                          pd->port_pkeys[i]);
-               for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
-                       /* check for match independent of the global bit */
-                       if ((dd->ipath_pkeys[j] & 0x7fff) !=
-                           (pd->port_pkeys[i] & 0x7fff))
-                               continue;
-                       if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
-                               ipath_cdbg(VERBOSE, "p%u clear key "
-                                          "%x matches #%d\n",
-                                          pd->port_port,
-                                          pd->port_pkeys[i], j);
-                               ipath_stats.sps_pkeys[j] =
-                                       dd->ipath_pkeys[j] = 0;
-                               pchanged++;
-                       } else {
-                               ipath_cdbg(VERBOSE, "p%u key %x matches #%d, "
-                                          "but ref still %d\n", pd->port_port,
-                                          pd->port_pkeys[i], j,
-                                          atomic_read(&dd->ipath_pkeyrefs[j]));
-                               break;
-                       }
-               }
-               pd->port_pkeys[i] = 0;
-       }
-       if (pchanged) {
-               u64 pkey = (u64) dd->ipath_pkeys[0] |
-                       ((u64) dd->ipath_pkeys[1] << 16) |
-                       ((u64) dd->ipath_pkeys[2] << 32) |
-                       ((u64) dd->ipath_pkeys[3] << 48);
-               ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
-                          "new pkey reg %llx\n", pd->port_port,
-                          (unsigned long long) oldpkey,
-                          (unsigned long long) pkey);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-                                pkey);
-       }
-}
-
-/*
- * Initialize the port data with the receive buffer sizes
- * so this can be done while the master port is locked.
- * Otherwise, there is a race with a slave opening the port
- * and seeing these fields uninitialized.
- */
-static void init_user_egr_sizes(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned egrperchunk, egrcnt, size;
-
-       /*
-        * to avoid wasting a lot of memory, we allocate 32KB chunks of
-        * physically contiguous memory, advance through it until used up
-        * and then allocate more.  Of course, we need memory to store those
-        * extra pointers, now.  Started out with 256KB, but under heavy
-        * memory pressure (creating large files and then copying them over
-        * NFS while doing lots of MPI jobs), we hit some allocation
-        * failures, even though we can sleep...  (2.6.10) Still get
-        * failures at 64K.  32K is the lowest we can go without wasting
-        * additional memory.
-        */
-       size = 0x8000;
-       egrperchunk = size / dd->ipath_rcvegrbufsize;
-       egrcnt = dd->ipath_rcvegrcnt;
-       pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
-       pd->port_rcvegrbufs_perchunk = egrperchunk;
-       pd->port_rcvegrbuf_size = size;
-}
-
-/**
- * ipath_create_user_egr - allocate eager TID buffers
- * @pd: the port to allocate TID buffers for
- *
- * This routine is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance
- * This is the user port version
- *
- * Allocate the eager TID buffers and program them into infinipath
- * They are no longer completely contiguous, we do multiple allocation
- * calls.
- */
-static int ipath_create_user_egr(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
-       size_t size;
-       int ret;
-       gfp_t gfp_flags;
-
-       /*
-        * GFP_USER, but without GFP_FS, so buffer cache can be
-        * coalesced (we hope); otherwise, even at order 4,
-        * heavy filesystem activity makes these fail, and we can
-        * use compound pages.
-        */
-       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
-       egrcnt = dd->ipath_rcvegrcnt;
-       /* TID number offset for this port */
-       egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
-       egrsize = dd->ipath_rcvegrbufsize;
-       ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
-                  "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
-
-       chunk = pd->port_rcvegrbuf_chunks;
-       egrperchunk = pd->port_rcvegrbufs_perchunk;
-       size = pd->port_rcvegrbuf_size;
-       pd->port_rcvegrbuf = kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf[0]),
-                                          GFP_KERNEL);
-       if (!pd->port_rcvegrbuf) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-       pd->port_rcvegrbuf_phys =
-               kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf_phys[0]),
-                             GFP_KERNEL);
-       if (!pd->port_rcvegrbuf_phys) {
-               ret = -ENOMEM;
-               goto bail_rcvegrbuf;
-       }
-       for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-
-               pd->port_rcvegrbuf[e] = dma_alloc_coherent(
-                       &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
-                       gfp_flags);
-
-               if (!pd->port_rcvegrbuf[e]) {
-                       ret = -ENOMEM;
-                       goto bail_rcvegrbuf_phys;
-               }
-       }
-
-       pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
-
-       for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
-               dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
-               unsigned i;
-
-               for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
-                       dd->ipath_f_put_tid(dd, e + egroff +
-                                           (u64 __iomem *)
-                                           ((char __iomem *)
-                                            dd->ipath_kregbase +
-                                            dd->ipath_rcvegrbase),
-                                           RCVHQ_RCV_TYPE_EAGER, pa);
-                       pa += egrsize;
-               }
-               cond_resched(); /* don't hog the cpu */
-       }
-
-       ret = 0;
-       goto bail;
-
-bail_rcvegrbuf_phys:
-       for (e = 0; e < pd->port_rcvegrbuf_chunks &&
-               pd->port_rcvegrbuf[e]; e++) {
-               dma_free_coherent(&dd->pcidev->dev, size,
-                                 pd->port_rcvegrbuf[e],
-                                 pd->port_rcvegrbuf_phys[e]);
-
-       }
-       kfree(pd->port_rcvegrbuf_phys);
-       pd->port_rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
-       kfree(pd->port_rcvegrbuf);
-       pd->port_rcvegrbuf = NULL;
-bail:
-       return ret;
-}
-
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int ipath_mmap_mem(struct vm_area_struct *vma,
-       struct ipath_portdata *pd, unsigned len, int write_ok,
-       void *kvaddr, char *what)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned long pfn;
-       int ret;
-
-       if ((vma->vm_end - vma->vm_start) > len) {
-               dev_info(&dd->pcidev->dev,
-                        "FAIL on %s: len %lx > %x\n", what,
-                        vma->vm_end - vma->vm_start, len);
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       if (!write_ok) {
-               if (vma->vm_flags & VM_WRITE) {
-                       dev_info(&dd->pcidev->dev,
-                                "%s must be mapped readonly\n", what);
-                       ret = -EPERM;
-                       goto bail;
-               }
-
-               /* don't allow them to later change with mprotect */
-               vma->vm_flags &= ~VM_MAYWRITE;
-       }
-
-       pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
-       ret = remap_pfn_range(vma, vma->vm_start, pfn,
-                             len, vma->vm_page_prot);
-       if (ret)
-               dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
-                        "bytes r%c failed: %d\n", what, pd->port_port,
-                        pfn, len, write_ok?'w':'o', ret);
-       else
-               ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
-                          "r%c\n", what, pd->port_port, pfn, len,
-                          write_ok?'w':'o');
-bail:
-       return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
-                    u64 ureg)
-{
-       unsigned long phys;
-       int ret;
-
-       /*
-        * This is real hardware, so use io_remap.  This is the mechanism
-        * for the user process to update the head registers for their port
-        * in the chip.
-        */
-       if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-               dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
-                        "%lx > PAGE\n", vma->vm_end - vma->vm_start);
-               ret = -EFAULT;
-       } else {
-               phys = dd->ipath_physaddr + ureg;
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-               vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-               ret = io_remap_pfn_range(vma, vma->vm_start,
-                                        phys >> PAGE_SHIFT,
-                                        vma->vm_end - vma->vm_start,
-                                        vma->vm_page_prot);
-       }
-       return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
-                       struct ipath_devdata *dd,
-                       struct ipath_portdata *pd,
-                       unsigned piobufs, unsigned piocnt)
-{
-       unsigned long phys;
-       int ret;
-
-       /*
-        * When we map the PIO buffers in the chip, we want to map them as
-        * writeonly, no read possible.   This prevents access to previous
-        * process data, and catches users who might try to read the i/o
-        * space due to a bug.
-        */
-       if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
-               dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
-                        "reqlen %lx > PAGE\n",
-                        vma->vm_end - vma->vm_start);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       phys = dd->ipath_physaddr + piobufs;
-
-#if defined(__powerpc__)
-       /* There isn't a generic way to specify writethrough mappings */
-       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-       pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
-       pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
-#endif
-
-       /*
-        * don't allow them to later change to readable with mprotect (for when
-        * not initially mapped readable, as is normally the case)
-        */
-       vma->vm_flags &= ~VM_MAYREAD;
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-
-       ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
-                                vma->vm_end - vma->vm_start,
-                                vma->vm_page_prot);
-bail:
-       return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
-                          struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned long start, size;
-       size_t total_size, i;
-       unsigned long pfn;
-       int ret;
-
-       size = pd->port_rcvegrbuf_size;
-       total_size = pd->port_rcvegrbuf_chunks * size;
-       if ((vma->vm_end - vma->vm_start) > total_size) {
-               dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
-                        "reqlen %lx > actual %lx\n",
-                        vma->vm_end - vma->vm_start,
-                        (unsigned long) total_size);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (vma->vm_flags & VM_WRITE) {
-               dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
-                        "writable (flags=%lx)\n", vma->vm_flags);
-               ret = -EPERM;
-               goto bail;
-       }
-       /* don't allow them to later change to writeable with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
-
-       start = vma->vm_start;
-
-       for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
-               pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
-               ret = remap_pfn_range(vma, start, pfn, size,
-                                     vma->vm_page_prot);
-               if (ret < 0)
-                       goto bail;
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/*
- * ipath_file_vma_fault - handle a VMA page fault.
- */
-static int ipath_file_vma_fault(struct vm_area_struct *vma,
-                                       struct vm_fault *vmf)
-{
-       struct page *page;
-
-       page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
-       if (!page)
-               return VM_FAULT_SIGBUS;
-       get_page(page);
-       vmf->page = page;
-
-       return 0;
-}
-
-static const struct vm_operations_struct ipath_file_vm_ops = {
-       .fault = ipath_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
-                      struct ipath_portdata *pd, unsigned subport)
-{
-       unsigned long len;
-       struct ipath_devdata *dd;
-       void *addr;
-       size_t size;
-       int ret = 0;
-
-       /* If the port is not shared, all addresses should be physical */
-       if (!pd->port_subport_cnt)
-               goto bail;
-
-       dd = pd->port_dd;
-       size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-
-       /*
-        * Each process has all the subport uregbase, rcvhdrq, and
-        * rcvegrbufs mmapped - as an array for all the processes,
-        * and also separately for this process.
-        */
-       if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
-               addr = pd->subport_uregbase;
-               size = PAGE_SIZE * pd->port_subport_cnt;
-       } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
-               addr = pd->subport_rcvhdr_base;
-               size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
-       } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
-               addr = pd->subport_rcvegrbuf;
-               size *= pd->port_subport_cnt;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
-                                        PAGE_SIZE * subport)) {
-                addr = pd->subport_uregbase + PAGE_SIZE * subport;
-                size = PAGE_SIZE;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
-                                pd->port_rcvhdrq_size * subport)) {
-                addr = pd->subport_rcvhdr_base +
-                        pd->port_rcvhdrq_size * subport;
-                size = pd->port_rcvhdrq_size;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
-                               size * subport)) {
-                addr = pd->subport_rcvegrbuf + size * subport;
-                /* rcvegrbufs are read-only on the slave */
-                if (vma->vm_flags & VM_WRITE) {
-                        dev_info(&dd->pcidev->dev,
-                                 "Can't map eager buffers as "
-                                 "writable (flags=%lx)\n", vma->vm_flags);
-                        ret = -EPERM;
-                        goto bail;
-                }
-                /*
-                 * Don't allow permission to later change to writeable
-                 * with mprotect.
-                 */
-                vma->vm_flags &= ~VM_MAYWRITE;
-       } else {
-               goto bail;
-       }
-       len = vma->vm_end - vma->vm_start;
-       if (len > size) {
-               ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
-       vma->vm_ops = &ipath_file_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_mmap - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
- * buffers in the chip.  We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
-{
-       struct ipath_portdata *pd;
-       struct ipath_devdata *dd;
-       u64 pgaddr, ureg;
-       unsigned piobufs, piocnt;
-       int ret;
-
-       pd = port_fp(fp);
-       if (!pd) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       dd = pd->port_dd;
-
-       /*
-        * This is the ipath_do_user_init() code, mapping the shared buffers
-        * into the user process. The address referred to by vm_pgoff is the
-        * file offset passed via mmap().  For shared ports, this is the
-        * kernel vmalloc() address of the pages to share with the master.
-        * For non-shared or master ports, this is a physical address.
-        * We only do one mmap for each space mapped.
-        */
-       pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
-       /*
-        * Check for 0 in case one of the allocations failed, but user
-        * called mmap anyway.
-        */
-       if (!pgaddr)  {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
-                  (unsigned long long) pgaddr, vma->vm_start,
-                  vma->vm_end - vma->vm_start, dd->ipath_unit,
-                  pd->port_port, subport_fp(fp));
-
-       /*
-        * Physical addresses must fit in 40 bits for our hardware.
-        * Check for kernel virtual addresses first, anything else must
-        * match a HW or memory address.
-        */
-       ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
-       if (ret) {
-               if (ret > 0)
-                       ret = 0;
-               goto bail;
-       }
-
-       ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
-       if (!pd->port_subport_cnt) {
-               /* port is not shared */
-               piocnt = pd->port_piocnt;
-               piobufs = pd->port_piobufs;
-       } else if (!subport_fp(fp)) {
-               /* caller is the master */
-               piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
-                        (pd->port_piocnt % pd->port_subport_cnt);
-               piobufs = pd->port_piobufs +
-                       dd->ipath_palign * (pd->port_piocnt - piocnt);
-       } else {
-               unsigned slave = subport_fp(fp) - 1;
-
-               /* caller is a slave */
-               piocnt = pd->port_piocnt / pd->port_subport_cnt;
-               piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
-       }
-
-       if (pgaddr == ureg)
-               ret = mmap_ureg(vma, dd, ureg);
-       else if (pgaddr == piobufs)
-               ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
-       else if (pgaddr == dd->ipath_pioavailregs_phys)
-               /* in-memory copy of pioavail registers */
-               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
-                                    (void *) dd->ipath_pioavailregs_dma,
-                                    "pioavail registers");
-       else if (pgaddr == pd->port_rcvegr_phys)
-               ret = mmap_rcvegrbufs(vma, pd);
-       else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
-               /*
-                * The rcvhdrq itself; readonly except on HT (so have
-                * to allow writable mapping), multiple pages, contiguous
-                * from an i/o perspective.
-                */
-               ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
-                                    pd->port_rcvhdrq,
-                                    "rcvhdrq");
-       else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
-               /* in-memory copy of rcvhdrq tail register */
-               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
-                                    pd->port_rcvhdrtail_kvaddr,
-                                    "rcvhdrq tail");
-       else
-               ret = -EINVAL;
-
-       vma->vm_private_data = NULL;
-
-       if (ret < 0)
-               dev_info(&dd->pcidev->dev,
-                        "Failure %d on off %llx len %lx\n",
-                        -ret, (unsigned long long)pgaddr,
-                        vma->vm_end - vma->vm_start);
-bail:
-       return ret;
-}
-
-static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
-{
-       unsigned pollflag = 0;
-
-       if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
-           pd->port_hdrqfull != pd->port_hdrqfull_poll) {
-               pollflag |= POLLIN | POLLRDNORM;
-               pd->port_hdrqfull_poll = pd->port_hdrqfull;
-       }
-
-       return pollflag;
-}
-
-static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
-                                     struct file *fp,
-                                     struct poll_table_struct *pt)
-{
-       unsigned pollflag = 0;
-       struct ipath_devdata *dd;
-
-       dd = pd->port_dd;
-
-       /* variable access in ipath_poll_hdrqfull() needs this */
-       rmb();
-       pollflag = ipath_poll_hdrqfull(pd);
-
-       if (pd->port_urgent != pd->port_urgent_poll) {
-               pollflag |= POLLIN | POLLRDNORM;
-               pd->port_urgent_poll = pd->port_urgent;
-       }
-
-       if (!pollflag) {
-               /* this saves a spin_lock/unlock in interrupt handler... */
-               set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
-               /* flush waiting flag so don't miss an event... */
-               wmb();
-               poll_wait(fp, &pd->port_wait, pt);
-       }
-
-       return pollflag;
-}
-
-static unsigned int ipath_poll_next(struct ipath_portdata *pd,
-                                   struct file *fp,
-                                   struct poll_table_struct *pt)
-{
-       u32 head;
-       u32 tail;
-       unsigned pollflag = 0;
-       struct ipath_devdata *dd;
-
-       dd = pd->port_dd;
-
-       /* variable access in ipath_poll_hdrqfull() needs this */
-       rmb();
-       pollflag = ipath_poll_hdrqfull(pd);
-
-       head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
-       if (pd->port_rcvhdrtail_kvaddr)
-               tail = ipath_get_rcvhdrtail(pd);
-       else
-               tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-
-       if (head != tail)
-               pollflag |= POLLIN | POLLRDNORM;
-       else {
-               /* this saves a spin_lock/unlock in interrupt handler */
-               set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
-               /* flush waiting flag so we don't miss an event */
-               wmb();
-
-               set_bit(pd->port_port + dd->ipath_r_intravail_shift,
-                       &dd->ipath_rcvctrl);
-
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                                dd->ipath_rcvctrl);
-
-               if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
-                       ipath_write_ureg(dd, ur_rcvhdrhead,
-                                        dd->ipath_rhdrhead_intr_off | head,
-                                        pd->port_port);
-
-               poll_wait(fp, &pd->port_wait, pt);
-       }
-
-       return pollflag;
-}
-
-static unsigned int ipath_poll(struct file *fp,
-                              struct poll_table_struct *pt)
-{
-       struct ipath_portdata *pd;
-       unsigned pollflag;
-
-       pd = port_fp(fp);
-       if (!pd)
-               pollflag = 0;
-       else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
-               pollflag = ipath_poll_urgent(pd, fp, pt);
-       else
-               pollflag = ipath_poll_next(pd, fp, pt);
-
-       return pollflag;
-}
-
-static int ipath_supports_subports(int user_swmajor, int user_swminor)
-{
-       /* no subport implementation prior to software version 1.3 */
-       return (user_swmajor > 1) || (user_swminor >= 3);
-}
-
-static int ipath_compatible_subports(int user_swmajor, int user_swminor)
-{
-       /* this code is written long-hand for clarity */
-       if (IPATH_USER_SWMAJOR != user_swmajor) {
-               /* no promise of compatibility if major mismatch */
-               return 0;
-       }
-       if (IPATH_USER_SWMAJOR == 1) {
-               switch (IPATH_USER_SWMINOR) {
-               case 0:
-               case 1:
-               case 2:
-                       /* no subport implementation so cannot be compatible */
-                       return 0;
-               case 3:
-                       /* 3 is only compatible with itself */
-                       return user_swminor == 3;
-               default:
-                       /* >= 4 are compatible (or are expected to be) */
-                       return user_swminor >= 4;
-               }
-       }
-       /* make no promises yet for future major versions */
-       return 0;
-}
-
-static int init_subports(struct ipath_devdata *dd,
-                        struct ipath_portdata *pd,
-                        const struct ipath_user_info *uinfo)
-{
-       int ret = 0;
-       unsigned num_subports;
-       size_t size;
-
-       /*
-        * If the user is requesting zero subports,
-        * skip the subport allocation.
-        */
-       if (uinfo->spu_subport_cnt <= 0)
-               goto bail;
-
-       /* Self-consistency check for ipath_compatible_subports() */
-       if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
-           !ipath_compatible_subports(IPATH_USER_SWMAJOR,
-                                      IPATH_USER_SWMINOR)) {
-               dev_info(&dd->pcidev->dev,
-                        "Inconsistent ipath_compatible_subports()\n");
-               goto bail;
-       }
-
-       /* Check for subport compatibility */
-       if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
-                                      uinfo->spu_userversion & 0xffff)) {
-               dev_info(&dd->pcidev->dev,
-                        "Mismatched user version (%d.%d) and driver "
-                        "version (%d.%d) while port sharing. Ensure "
-                         "that driver and library are from the same "
-                         "release.\n",
-                        (int) (uinfo->spu_userversion >> 16),
-                         (int) (uinfo->spu_userversion & 0xffff),
-                        IPATH_USER_SWMAJOR,
-                        IPATH_USER_SWMINOR);
-               goto bail;
-       }
-       if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       num_subports = uinfo->spu_subport_cnt;
-       pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
-       if (!pd->subport_uregbase) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-       /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
-       size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                    sizeof(u32), PAGE_SIZE) * num_subports;
-       pd->subport_rcvhdr_base = vzalloc(size);
-       if (!pd->subport_rcvhdr_base) {
-               ret = -ENOMEM;
-               goto bail_ureg;
-       }
-
-       pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
-                                       pd->port_rcvegrbuf_size *
-                                       num_subports);
-       if (!pd->subport_rcvegrbuf) {
-               ret = -ENOMEM;
-               goto bail_rhdr;
-       }
-
-       pd->port_subport_cnt = uinfo->spu_subport_cnt;
-       pd->port_subport_id = uinfo->spu_subport_id;
-       pd->active_slaves = 1;
-       set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-       goto bail;
-
-bail_rhdr:
-       vfree(pd->subport_rcvhdr_base);
-bail_ureg:
-       vfree(pd->subport_uregbase);
-       pd->subport_uregbase = NULL;
-bail:
-       return ret;
-}
-
-static int try_alloc_port(struct ipath_devdata *dd, int port,
-                         struct file *fp,
-                         const struct ipath_user_info *uinfo)
-{
-       struct ipath_portdata *pd;
-       int ret;
-
-       if (!(pd = dd->ipath_pd[port])) {
-               void *ptmp;
-
-               pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
-
-               /*
-                * Allocate memory for use in ipath_tid_update() just once
-                * at open, not per call.  Reduces cost of expected send
-                * setup.
-                */
-               ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
-                              dd->ipath_rcvtidcnt * sizeof(struct page **),
-                              GFP_KERNEL);
-               if (!pd || !ptmp) {
-                       ipath_dev_err(dd, "Unable to allocate portdata "
-                                     "memory, failing open\n");
-                       ret = -ENOMEM;
-                       kfree(pd);
-                       kfree(ptmp);
-                       goto bail;
-               }
-               dd->ipath_pd[port] = pd;
-               dd->ipath_pd[port]->port_port = port;
-               dd->ipath_pd[port]->port_dd = dd;
-               dd->ipath_pd[port]->port_tid_pg_list = ptmp;
-               init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
-       }
-       if (!pd->port_cnt) {
-               pd->userversion = uinfo->spu_userversion;
-               init_user_egr_sizes(pd);
-               if ((ret = init_subports(dd, pd, uinfo)) != 0)
-                       goto bail;
-               ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
-                          current->comm, current->pid, dd->ipath_unit,
-                          port);
-               pd->port_cnt = 1;
-               port_fp(fp) = pd;
-               pd->port_pid = get_pid(task_pid(current));
-               strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
-               ipath_stats.sps_ports++;
-               ret = 0;
-       } else
-               ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-static inline int usable(struct ipath_devdata *dd)
-{
-       return dd &&
-               (dd->ipath_flags & IPATH_PRESENT) &&
-               dd->ipath_kregbase &&
-               dd->ipath_lid &&
-               !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
-                                    | IPATH_LINKUNK));
-}
-
-static int find_free_port(int unit, struct file *fp,
-                         const struct ipath_user_info *uinfo)
-{
-       struct ipath_devdata *dd = ipath_lookup(unit);
-       int ret, i;
-
-       if (!dd) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       if (!usable(dd)) {
-               ret = -ENETDOWN;
-               goto bail;
-       }
-
-       for (i = 1; i < dd->ipath_cfgports; i++) {
-               ret = try_alloc_port(dd, i, fp, uinfo);
-               if (ret != -EBUSY)
-                       goto bail;
-       }
-       ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-static int find_best_unit(struct file *fp,
-                         const struct ipath_user_info *uinfo)
-{
-       int ret = 0, i, prefunit = -1, devmax;
-       int maxofallports, npresent, nup;
-       int ndev;
-
-       devmax = ipath_count_units(&npresent, &nup, &maxofallports);
-
-       /*
-        * This code is present to allow a knowledgeable person to
-        * specify the layout of processes to processors before opening
-        * this driver, and then we'll assign the process to the "closest"
-        * InfiniPath chip to that processor (we assume reasonable connectivity,
-        * for now).  This code assumes that if affinity has been set
-        * before this point, that at most one cpu is set; for now this
-        * is reasonable.  I check for both cpumask_empty() and cpumask_full(),
-        * in case some kernel variant sets none of the bits when no
-        * affinity is set.  2.6.11 and 12 kernels have all present
-        * cpus set.  Some day we'll have to fix it up further to handle
-        * a cpu subset.  This algorithm fails for two HT chips connected
-        * in tunnel fashion.  Eventually this needs real topology
-        * information.  There may be some issues with dual core numbering
-        * as well.  This needs more work prior to release.
-        */
-       if (!cpumask_empty(tsk_cpus_allowed(current)) &&
-           !cpumask_full(tsk_cpus_allowed(current))) {
-               int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
-               get_online_cpus();
-               for_each_online_cpu(i)
-                       if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
-                               ipath_cdbg(PROC, "%s[%u] affinity set for "
-                                          "cpu %d/%d\n", current->comm,
-                                          current->pid, i, ncpus);
-                               curcpu = i;
-                               nset++;
-                       }
-               put_online_cpus();
-               if (curcpu != -1 && nset != ncpus) {
-                       if (npresent) {
-                               prefunit = curcpu / (ncpus / npresent);
-                               ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
-                                         "%d cpus/chip, select unit %d\n",
-                                         current->comm, current->pid,
-                                         npresent, ncpus, ncpus / npresent,
-                                         prefunit);
-                       }
-               }
-       }
-
-       /*
-        * user ports start at 1, kernel port is 0
-        * For now, we do round-robin access across all chips
-        */
-
-       if (prefunit != -1)
-               devmax = prefunit + 1;
-recheck:
-       for (i = 1; i < maxofallports; i++) {
-               for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
-                    ndev++) {
-                       struct ipath_devdata *dd = ipath_lookup(ndev);
-
-                       if (!usable(dd))
-                               continue; /* can't use this unit */
-                       if (i >= dd->ipath_cfgports)
-                               /*
-                                * Maxed out on users of this unit. Try
-                                * next.
-                                */
-                               continue;
-                       ret = try_alloc_port(dd, i, fp, uinfo);
-                       if (!ret)
-                               goto done;
-               }
-       }
-
-       if (npresent) {
-               if (nup == 0) {
-                       ret = -ENETDOWN;
-                       ipath_dbg("No ports available (none initialized "
-                                 "and ready)\n");
-               } else {
-                       if (prefunit > 0) {
-                               /* if started above 0, retry from 0 */
-                               ipath_cdbg(PROC,
-                                          "%s[%u] no ports on prefunit "
-                                          "%d, clear and re-check\n",
-                                          current->comm, current->pid,
-                                          prefunit);
-                               devmax = ipath_count_units(NULL, NULL,
-                                                          NULL);
-                               prefunit = -1;
-                               goto recheck;
-                       }
-                       ret = -EBUSY;
-                       ipath_dbg("No ports available\n");
-               }
-       } else {
-               ret = -ENXIO;
-               ipath_dbg("No boards found\n");
-       }
-
-done:
-       return ret;
-}
-
-static int find_shared_port(struct file *fp,
-                           const struct ipath_user_info *uinfo)
-{
-       int devmax, ndev, i;
-       int ret = 0;
-
-       devmax = ipath_count_units(NULL, NULL, NULL);
-
-       for (ndev = 0; ndev < devmax; ndev++) {
-               struct ipath_devdata *dd = ipath_lookup(ndev);
-
-               if (!usable(dd))
-                       continue;
-               for (i = 1; i < dd->ipath_cfgports; i++) {
-                       struct ipath_portdata *pd = dd->ipath_pd[i];
-
-                       /* Skip ports which are not yet open */
-                       if (!pd || !pd->port_cnt)
-                               continue;
-                       /* Skip port if it doesn't match the requested one */
-                       if (pd->port_subport_id != uinfo->spu_subport_id)
-                               continue;
-                       /* Verify the sharing process matches the master */
-                       if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
-                           pd->userversion != uinfo->spu_userversion ||
-                           pd->port_cnt >= pd->port_subport_cnt) {
-                               ret = -EINVAL;
-                               goto done;
-                       }
-                       port_fp(fp) = pd;
-                       subport_fp(fp) = pd->port_cnt++;
-                       pd->port_subpid[subport_fp(fp)] =
-                               get_pid(task_pid(current));
-                       tidcursor_fp(fp) = 0;
-                       pd->active_slaves |= 1 << subport_fp(fp);
-                       ipath_cdbg(PROC,
-                                  "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
-                                  current->comm, current->pid,
-                                  subport_fp(fp),
-                                  pd->port_comm, pid_nr(pd->port_pid),
-                                  dd->ipath_unit, pd->port_port);
-                       ret = 1;
-                       goto done;
-               }
-       }
-
-done:
-       return ret;
-}
-
-static int ipath_open(struct inode *in, struct file *fp)
-{
-       /* The real work is performed later in ipath_assign_port() */
-       fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
-       return fp->private_data ? 0 : -ENOMEM;
-}
-
-/* Get port early, so can set affinity prior to memory allocation */
-static int ipath_assign_port(struct file *fp,
-                             const struct ipath_user_info *uinfo)
-{
-       int ret;
-       int i_minor;
-       unsigned swmajor, swminor;
-
-       /* Check to be sure we haven't already initialized this file */
-       if (port_fp(fp)) {
-               ret = -EINVAL;
-               goto done;
-       }
-
-       /* for now, if major version is different, bail */
-       swmajor = uinfo->spu_userversion >> 16;
-       if (swmajor != IPATH_USER_SWMAJOR) {
-               ipath_dbg("User major version %d not same as driver "
-                         "major %d\n", uinfo->spu_userversion >> 16,
-                         IPATH_USER_SWMAJOR);
-               ret = -ENODEV;
-               goto done;
-       }
-
-       swminor = uinfo->spu_userversion & 0xffff;
-       if (swminor != IPATH_USER_SWMINOR)
-               ipath_dbg("User minor version %d not same as driver "
-                         "minor %d\n", swminor, IPATH_USER_SWMINOR);
-
-       mutex_lock(&ipath_mutex);
-
-       if (ipath_compatible_subports(swmajor, swminor) &&
-           uinfo->spu_subport_cnt &&
-           (ret = find_shared_port(fp, uinfo))) {
-               if (ret > 0)
-                       ret = 0;
-               goto done_chk_sdma;
-       }
-
-       i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
-       ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-                  (long)file_inode(fp)->i_rdev, i_minor);
-
-       if (i_minor)
-               ret = find_free_port(i_minor - 1, fp, uinfo);
-       else
-               ret = find_best_unit(fp, uinfo);
-
-done_chk_sdma:
-       if (!ret) {
-               struct ipath_filedata *fd = fp->private_data;
-               const struct ipath_portdata *pd = fd->pd;
-               const struct ipath_devdata *dd = pd->port_dd;
-
-               fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
-                                                     dd->ipath_unit,
-                                                     pd->port_port,
-                                                     fd->subport);
-
-               if (!fd->pq)
-                       ret = -ENOMEM;
-       }
-
-       mutex_unlock(&ipath_mutex);
-
-done:
-       return ret;
-}
-
-
-static int ipath_do_user_init(struct file *fp,
-                             const struct ipath_user_info *uinfo)
-{
-       int ret;
-       struct ipath_portdata *pd = port_fp(fp);
-       struct ipath_devdata *dd;
-       u32 head32;
-
-       /* Subports don't need to initialize anything since master did it. */
-       if (subport_fp(fp)) {
-               ret = wait_event_interruptible(pd->port_wait,
-                       !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
-               goto done;
-       }
-
-       dd = pd->port_dd;
-
-       if (uinfo->spu_rcvhdrsize) {
-               ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
-               if (ret)
-                       goto done;
-       }
-
-       /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
-
-       /* some ports may get extra buffers, calculate that here */
-       if (pd->port_port <= dd->ipath_ports_extrabuf)
-               pd->port_piocnt = dd->ipath_pbufsport + 1;
-       else
-               pd->port_piocnt = dd->ipath_pbufsport;
-
-       /* for right now, kernel piobufs are at end, so port 1 is at 0 */
-       if (pd->port_port <= dd->ipath_ports_extrabuf)
-               pd->port_pio_base = (dd->ipath_pbufsport + 1)
-                       * (pd->port_port - 1);
-       else
-               pd->port_pio_base = dd->ipath_ports_extrabuf +
-                       dd->ipath_pbufsport * (pd->port_port - 1);
-       pd->port_piobufs = dd->ipath_piobufbase +
-               pd->port_pio_base * dd->ipath_palign;
-       ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
-               " first pio %u\n", pd->port_port, pd->port_piobufs,
-               pd->port_piocnt, pd->port_pio_base);
-       ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
-
-       /*
-        * Now allocate the rcvhdr Q and eager TIDs; skip the TID
-        * array for time being.  If pd->port_port > chip-supported,
-        * we need to do extra stuff here to handle by handling overflow
-        * through port 0, someday
-        */
-       ret = ipath_create_rcvhdrq(dd, pd);
-       if (!ret)
-               ret = ipath_create_user_egr(pd);
-       if (ret)
-               goto done;
-
-       /*
-        * set the eager head register for this port to the current values
-        * of the tail pointers, since we don't know if they were
-        * updated on last use of the port.
-        */
-       head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
-       ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
-       pd->port_lastrcvhdrqtail = -1;
-       ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
-               pd->port_port, head32);
-       pd->port_tidcursor = 0; /* start at beginning after open */
-
-       /* initialize poll variables... */
-       pd->port_urgent = 0;
-       pd->port_urgent_poll = 0;
-       pd->port_hdrqfull_poll = pd->port_hdrqfull;
-
-       /*
-        * Now enable the port for receive.
-        * For chips that are set to DMA the tail register to memory
-        * when they change (and when the update bit transitions from
-        * 0 to 1.  So for those chips, we turn it off and then back on.
-        * This will (very briefly) affect any other open ports, but the
-        * duration is very short, and therefore isn't an issue.  We
-        * explicitly set the in-memory tail copy to 0 beforehand, so we
-        * don't have to wait to be sure the DMA update has happened
-        * (chip resets head/tail to 0 on transition to enable).
-        */
-       set_bit(dd->ipath_r_portenable_shift + pd->port_port,
-               &dd->ipath_rcvctrl);
-       if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-               if (pd->port_rcvhdrtail_kvaddr)
-                       ipath_clear_rcvhdrtail(pd);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                       dd->ipath_rcvctrl &
-                       ~(1ULL << dd->ipath_r_tailupd_shift));
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-       /* Notify any waiting slaves */
-       if (pd->port_subport_cnt) {
-               clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-               wake_up(&pd->port_wait);
-       }
-done:
-       return ret;
-}
-
-/**
- * unlock_exptid - unlock any expected TID entries port still had in use
- * @pd: port
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using ipath_f_clear_tids.
- */
-static void unlock_expected_tids(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
-       int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-
-       ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
-                  pd->port_port);
-       for (i = port_tidbase; i < maxtid; i++) {
-               struct page *ps = dd->ipath_pageshadow[i];
-
-               if (!ps)
-                       continue;
-
-               dd->ipath_pageshadow[i] = NULL;
-               pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
-                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-               ipath_release_user_pages_on_close(&ps, 1);
-               cnt++;
-               ipath_stats.sps_pageunlocks++;
-       }
-       if (cnt)
-               ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
-                          pd->port_port, cnt);
-
-       if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
-               ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
-                          (unsigned long long) ipath_stats.sps_pagelocks,
-                          (unsigned long long)
-                          ipath_stats.sps_pageunlocks);
-}
-
-static int ipath_close(struct inode *in, struct file *fp)
-{
-       struct ipath_filedata *fd;
-       struct ipath_portdata *pd;
-       struct ipath_devdata *dd;
-       unsigned long flags;
-       unsigned port;
-       struct pid *pid;
-
-       ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
-                  (long)in->i_rdev, fp->private_data);
-
-       mutex_lock(&ipath_mutex);
-
-       fd = fp->private_data;
-       fp->private_data = NULL;
-       pd = fd->pd;
-       if (!pd) {
-               mutex_unlock(&ipath_mutex);
-               goto bail;
-       }
-
-       dd = pd->port_dd;
-
-       /* drain user sdma queue */
-       ipath_user_sdma_queue_drain(dd, fd->pq);
-       ipath_user_sdma_queue_destroy(fd->pq);
-
-       if (--pd->port_cnt) {
-               /*
-                * XXX If the master closes the port before the slave(s),
-                * revoke the mmap for the eager receive queue so
-                * the slave(s) don't wait for receive data forever.
-                */
-               pd->active_slaves &= ~(1 << fd->subport);
-               put_pid(pd->port_subpid[fd->subport]);
-               pd->port_subpid[fd->subport] = NULL;
-               mutex_unlock(&ipath_mutex);
-               goto bail;
-       }
-       /* early; no interrupt users after this */
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       port = pd->port_port;
-       dd->ipath_pd[port] = NULL;
-       pid = pd->port_pid;
-       pd->port_pid = NULL;
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
-       if (pd->port_rcvwait_to || pd->port_piowait_to
-           || pd->port_rcvnowait || pd->port_pionowait) {
-               ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
-                          "%u rcv %u, pio already\n",
-                          pd->port_port, pd->port_rcvwait_to,
-                          pd->port_piowait_to, pd->port_rcvnowait,
-                          pd->port_pionowait);
-               pd->port_rcvwait_to = pd->port_piowait_to =
-                       pd->port_rcvnowait = pd->port_pionowait = 0;
-       }
-       if (pd->port_flag) {
-               ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
-                         pd->port_port, pd->port_flag);
-               pd->port_flag = 0;
-       }
-
-       if (dd->ipath_kregbase) {
-               /* atomically clear receive enable port and intr avail. */
-               clear_bit(dd->ipath_r_portenable_shift + port,
-                         &dd->ipath_rcvctrl);
-               clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
-                         &dd->ipath_rcvctrl);
-               ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
-                       dd->ipath_rcvctrl);
-               /* and read back from chip to be sure that nothing
-                * else is in flight when we do the rest */
-               (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-               /* clean up the pkeys for this port user */
-               ipath_clean_part_key(pd, dd);
-               /*
-                * be paranoid, and never write 0's to these, just use an
-                * unused part of the port 0 tail page.  Of course,
-                * rcvhdraddr points to a large chunk of memory, so this
-                * could still trash things, but at least it won't trash
-                * page 0, and by disabling the port, it should stop "soon",
-                * even if a packet or two is in already in flight after we
-                * disabled the port.
-                */
-               ipath_write_kreg_port(dd,
-                       dd->ipath_kregs->kr_rcvhdrtailaddr, port,
-                       dd->ipath_dummy_hdrq_phys);
-               ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
-                       pd->port_port, dd->ipath_dummy_hdrq_phys);
-
-               ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
-               ipath_chg_pioavailkernel(dd, pd->port_pio_base,
-                       pd->port_piocnt, 1);
-
-               dd->ipath_f_clear_tids(dd, pd->port_port);
-
-               if (dd->ipath_pageshadow)
-                       unlock_expected_tids(pd);
-               ipath_stats.sps_ports--;
-               ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
-                          pd->port_comm, pid_nr(pid),
-                          dd->ipath_unit, port);
-       }
-
-       put_pid(pid);
-       mutex_unlock(&ipath_mutex);
-       ipath_free_pddata(dd, pd); /* after releasing the mutex */
-
-bail:
-       kfree(fd);
-       return 0;
-}
-
-static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
-                          struct ipath_port_info __user *uinfo)
-{
-       struct ipath_port_info info;
-       int nup;
-       int ret;
-       size_t sz;
-
-       (void) ipath_count_units(NULL, &nup, NULL);
-       info.num_active = nup;
-       info.unit = pd->port_dd->ipath_unit;
-       info.port = pd->port_port;
-       info.subport = subport;
-       /* Don't return new fields if old library opened the port. */
-       if (ipath_supports_subports(pd->userversion >> 16,
-                                   pd->userversion & 0xffff)) {
-               /* Number of user ports available for this device. */
-               info.num_ports = pd->port_dd->ipath_cfgports - 1;
-               info.num_subports = pd->port_subport_cnt;
-               sz = sizeof(info);
-       } else
-               sz = sizeof(info) - 2 * sizeof(u16);
-
-       if (copy_to_user(uinfo, &info, sz)) {
-               ret = -EFAULT;
-               goto bail;
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int ipath_get_slave_info(struct ipath_portdata *pd,
-                               void __user *slave_mask_addr)
-{
-       int ret = 0;
-
-       if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
-               ret = -EFAULT;
-       return ret;
-}
-
-static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
-                                  u32 __user *inflightp)
-{
-       const u32 val = ipath_user_sdma_inflight_counter(pq);
-
-       if (put_user(val, inflightp))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int ipath_sdma_get_complete(struct ipath_devdata *dd,
-                                  struct ipath_user_sdma_queue *pq,
-                                  u32 __user *completep)
-{
-       u32 val;
-       int err;
-
-       err = ipath_user_sdma_make_progress(dd, pq);
-       if (err < 0)
-               return err;
-
-       val = ipath_user_sdma_complete_counter(pq);
-       if (put_user(val, completep))
-               return -EFAULT;
-
-       return 0;
-}
-
-static ssize_t ipath_write(struct file *fp, const char __user *data,
-                          size_t count, loff_t *off)
-{
-       const struct ipath_cmd __user *ucmd;
-       struct ipath_portdata *pd;
-       const void __user *src;
-       size_t consumed, copy;
-       struct ipath_cmd cmd;
-       ssize_t ret = 0;
-       void *dest;
-
-       if (count < sizeof(cmd.type)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       ucmd = (const struct ipath_cmd __user *) data;
-
-       if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       consumed = sizeof(cmd.type);
-
-       switch (cmd.type) {
-       case IPATH_CMD_ASSIGN_PORT:
-       case __IPATH_CMD_USER_INIT:
-       case IPATH_CMD_USER_INIT:
-               copy = sizeof(cmd.cmd.user_info);
-               dest = &cmd.cmd.user_info;
-               src = &ucmd->cmd.user_info;
-               break;
-       case IPATH_CMD_RECV_CTRL:
-               copy = sizeof(cmd.cmd.recv_ctrl);
-               dest = &cmd.cmd.recv_ctrl;
-               src = &ucmd->cmd.recv_ctrl;
-               break;
-       case IPATH_CMD_PORT_INFO:
-               copy = sizeof(cmd.cmd.port_info);
-               dest = &cmd.cmd.port_info;
-               src = &ucmd->cmd.port_info;
-               break;
-       case IPATH_CMD_TID_UPDATE:
-       case IPATH_CMD_TID_FREE:
-               copy = sizeof(cmd.cmd.tid_info);
-               dest = &cmd.cmd.tid_info;
-               src = &ucmd->cmd.tid_info;
-               break;
-       case IPATH_CMD_SET_PART_KEY:
-               copy = sizeof(cmd.cmd.part_key);
-               dest = &cmd.cmd.part_key;
-               src = &ucmd->cmd.part_key;
-               break;
-       case __IPATH_CMD_SLAVE_INFO:
-               copy = sizeof(cmd.cmd.slave_mask_addr);
-               dest = &cmd.cmd.slave_mask_addr;
-               src = &ucmd->cmd.slave_mask_addr;
-               break;
-       case IPATH_CMD_PIOAVAILUPD:     // force an update of PIOAvail reg
-               copy = 0;
-               src = NULL;
-               dest = NULL;
-               break;
-       case IPATH_CMD_POLL_TYPE:
-               copy = sizeof(cmd.cmd.poll_type);
-               dest = &cmd.cmd.poll_type;
-               src = &ucmd->cmd.poll_type;
-               break;
-       case IPATH_CMD_ARMLAUNCH_CTRL:
-               copy = sizeof(cmd.cmd.armlaunch_ctrl);
-               dest = &cmd.cmd.armlaunch_ctrl;
-               src = &ucmd->cmd.armlaunch_ctrl;
-               break;
-       case IPATH_CMD_SDMA_INFLIGHT:
-               copy = sizeof(cmd.cmd.sdma_inflight);
-               dest = &cmd.cmd.sdma_inflight;
-               src = &ucmd->cmd.sdma_inflight;
-               break;
-       case IPATH_CMD_SDMA_COMPLETE:
-               copy = sizeof(cmd.cmd.sdma_complete);
-               dest = &cmd.cmd.sdma_complete;
-               src = &ucmd->cmd.sdma_complete;
-               break;
-       default:
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (copy) {
-               if ((count - consumed) < copy) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               if (copy_from_user(dest, src, copy)) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-
-               consumed += copy;
-       }
-
-       pd = port_fp(fp);
-       if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
-               cmd.type != IPATH_CMD_ASSIGN_PORT) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       switch (cmd.type) {
-       case IPATH_CMD_ASSIGN_PORT:
-               ret = ipath_assign_port(fp, &cmd.cmd.user_info);
-               if (ret)
-                       goto bail;
-               break;
-       case __IPATH_CMD_USER_INIT:
-               /* backwards compatibility, get port first */
-               ret = ipath_assign_port(fp, &cmd.cmd.user_info);
-               if (ret)
-                       goto bail;
-               /* and fall through to current version. */
-       case IPATH_CMD_USER_INIT:
-               ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
-               if (ret)
-                       goto bail;
-               ret = ipath_get_base_info(
-                       fp, (void __user *) (unsigned long)
-                       cmd.cmd.user_info.spu_base_info,
-                       cmd.cmd.user_info.spu_base_info_size);
-               break;
-       case IPATH_CMD_RECV_CTRL:
-               ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
-               break;
-       case IPATH_CMD_PORT_INFO:
-               ret = ipath_port_info(pd, subport_fp(fp),
-                                     (struct ipath_port_info __user *)
-                                     (unsigned long) cmd.cmd.port_info);
-               break;
-       case IPATH_CMD_TID_UPDATE:
-               ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
-               break;
-       case IPATH_CMD_TID_FREE:
-               ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
-               break;
-       case IPATH_CMD_SET_PART_KEY:
-               ret = ipath_set_part_key(pd, cmd.cmd.part_key);
-               break;
-       case __IPATH_CMD_SLAVE_INFO:
-               ret = ipath_get_slave_info(pd,
-                                          (void __user *) (unsigned long)
-                                          cmd.cmd.slave_mask_addr);
-               break;
-       case IPATH_CMD_PIOAVAILUPD:
-               ipath_force_pio_avail_update(pd->port_dd);
-               break;
-       case IPATH_CMD_POLL_TYPE:
-               pd->poll_type = cmd.cmd.poll_type;
-               break;
-       case IPATH_CMD_ARMLAUNCH_CTRL:
-               if (cmd.cmd.armlaunch_ctrl)
-                       ipath_enable_armlaunch(pd->port_dd);
-               else
-                       ipath_disable_armlaunch(pd->port_dd);
-               break;
-       case IPATH_CMD_SDMA_INFLIGHT:
-               ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
-                                             (u32 __user *) (unsigned long)
-                                             cmd.cmd.sdma_inflight);
-               break;
-       case IPATH_CMD_SDMA_COMPLETE:
-               ret = ipath_sdma_get_complete(pd->port_dd,
-                                             user_sdma_queue_fp(fp),
-                                             (u32 __user *) (unsigned long)
-                                             cmd.cmd.sdma_complete);
-               break;
-       }
-
-       if (ret >= 0)
-               ret = consumed;
-
-bail:
-       return ret;
-}
-
-static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
-       struct file *filp = iocb->ki_filp;
-       struct ipath_filedata *fp = filp->private_data;
-       struct ipath_portdata *pd = port_fp(filp);
-       struct ipath_user_sdma_queue *pq = fp->pq;
-
-       if (!iter_is_iovec(from) || !from->nr_segs)
-               return -EINVAL;
-
-       return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
-}
-
-static struct class *ipath_class;
-
-static int init_cdev(int minor, char *name, const struct file_operations *fops,
-                    struct cdev **cdevp, struct device **devp)
-{
-       const dev_t dev = MKDEV(IPATH_MAJOR, minor);
-       struct cdev *cdev = NULL;
-       struct device *device = NULL;
-       int ret;
-
-       cdev = cdev_alloc();
-       if (!cdev) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate cdev for minor %d, %s\n",
-                      minor, name);
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       cdev->owner = THIS_MODULE;
-       cdev->ops = fops;
-       kobject_set_name(&cdev->kobj, name);
-
-       ret = cdev_add(cdev, dev, 1);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not add cdev for minor %d, %s (err %d)\n",
-                      minor, name, -ret);
-               goto err_cdev;
-       }
-
-       device = device_create(ipath_class, NULL, dev, NULL, name);
-
-       if (IS_ERR(device)) {
-               ret = PTR_ERR(device);
-               printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-                      "device for minor %d, %s (err %d)\n",
-                      minor, name, -ret);
-               goto err_cdev;
-       }
-
-       goto done;
-
-err_cdev:
-       cdev_del(cdev);
-       cdev = NULL;
-
-done:
-       if (ret >= 0) {
-               *cdevp = cdev;
-               *devp = device;
-       } else {
-               *cdevp = NULL;
-               *devp = NULL;
-       }
-
-       return ret;
-}
-
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
-                   struct cdev **cdevp, struct device **devp)
-{
-       return init_cdev(minor, name, fops, cdevp, devp);
-}
-
-static void cleanup_cdev(struct cdev **cdevp,
-                        struct device **devp)
-{
-       struct device *dev = *devp;
-
-       if (dev) {
-               device_unregister(dev);
-               *devp = NULL;
-       }
-
-       if (*cdevp) {
-               cdev_del(*cdevp);
-               *cdevp = NULL;
-       }
-}
-
-void ipath_cdev_cleanup(struct cdev **cdevp,
-                       struct device **devp)
-{
-       cleanup_cdev(cdevp, devp);
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_dev;
-
-static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
-
-static int user_init(void)
-{
-       int ret;
-
-       ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
-                      "chrdev region (err %d)\n", -ret);
-               goto done;
-       }
-
-       ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
-
-       if (IS_ERR(ipath_class)) {
-               ret = PTR_ERR(ipath_class);
-               printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-                      "device class (err %d)\n", -ret);
-               goto bail;
-       }
-
-       goto done;
-bail:
-       unregister_chrdev_region(dev, IPATH_NMINORS);
-done:
-       return ret;
-}
-
-static void user_cleanup(void)
-{
-       if (ipath_class) {
-               class_destroy(ipath_class);
-               ipath_class = NULL;
-       }
-
-       unregister_chrdev_region(dev, IPATH_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-static atomic_t user_setup = ATOMIC_INIT(0);
-
-int ipath_user_add(struct ipath_devdata *dd)
-{
-       char name[10];
-       int ret;
-
-       if (atomic_inc_return(&user_count) == 1) {
-               ret = user_init();
-               if (ret < 0) {
-                       ipath_dev_err(dd, "Unable to set up user support: "
-                                     "error %d\n", -ret);
-                       goto bail;
-               }
-               ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
-                               &wildcard_dev);
-               if (ret < 0) {
-                       ipath_dev_err(dd, "Could not create wildcard "
-                                     "minor: error %d\n", -ret);
-                       goto bail_user;
-               }
-
-               atomic_set(&user_setup, 1);
-       }
-
-       snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
-
-       ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
-                       &dd->user_cdev, &dd->user_dev);
-       if (ret < 0)
-               ipath_dev_err(dd, "Could not create user minor %d, %s\n",
-                             dd->ipath_unit + 1, name);
-
-       goto bail;
-
-bail_user:
-       user_cleanup();
-bail:
-       return ret;
-}
-
-void ipath_user_remove(struct ipath_devdata *dd)
-{
-       cleanup_cdev(&dd->user_cdev, &dd->user_dev);
-
-       if (atomic_dec_return(&user_count) == 0) {
-               if (atomic_read(&user_setup) == 0)
-                       goto bail;
-
-               cleanup_cdev(&wildcard_cdev, &wildcard_dev);
-               user_cleanup();
-
-               atomic_set(&user_setup, 0);
-       }
-bail:
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
deleted file mode 100644 (file)
index 476fcdf..0000000
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-#define IPATHFS_MAGIC 0x726a77
-
-static struct super_block *ipath_super;
-
-static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
-                        umode_t mode, const struct file_operations *fops,
-                        void *data)
-{
-       int error;
-       struct inode *inode = new_inode(dir->i_sb);
-
-       if (!inode) {
-               error = -EPERM;
-               goto bail;
-       }
-
-       inode->i_ino = get_next_ino();
-       inode->i_mode = mode;
-       inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-       inode->i_private = data;
-       if (S_ISDIR(mode)) {
-               inode->i_op = &simple_dir_inode_operations;
-               inc_nlink(inode);
-               inc_nlink(dir);
-       }
-
-       inode->i_fop = fops;
-
-       d_instantiate(dentry, inode);
-       error = 0;
-
-bail:
-       return error;
-}
-
-static int create_file(const char *name, umode_t mode,
-                      struct dentry *parent, struct dentry **dentry,
-                      const struct file_operations *fops, void *data)
-{
-       int error;
-
-       inode_lock(d_inode(parent));
-       *dentry = lookup_one_len(name, parent, strlen(name));
-       if (!IS_ERR(*dentry))
-               error = ipathfs_mknod(d_inode(parent), *dentry,
-                                     mode, fops, data);
-       else
-               error = PTR_ERR(*dentry);
-       inode_unlock(d_inode(parent));
-
-       return error;
-}
-
-static ssize_t atomic_stats_read(struct file *file, char __user *buf,
-                                size_t count, loff_t *ppos)
-{
-       return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
-                                      sizeof ipath_stats);
-}
-
-static const struct file_operations atomic_stats_ops = {
-       .read = atomic_stats_read,
-       .llseek = default_llseek,
-};
-
-static ssize_t atomic_counters_read(struct file *file, char __user *buf,
-                                   size_t count, loff_t *ppos)
-{
-       struct infinipath_counters counters;
-       struct ipath_devdata *dd;
-
-       dd = file_inode(file)->i_private;
-       dd->ipath_f_read_counters(dd, &counters);
-
-       return simple_read_from_buffer(buf, count, ppos, &counters,
-                                      sizeof counters);
-}
-
-static const struct file_operations atomic_counters_ops = {
-       .read = atomic_counters_read,
-       .llseek = default_llseek,
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
-                         size_t count, loff_t *ppos)
-{
-       struct ipath_devdata *dd;
-       ssize_t ret;
-       loff_t pos;
-       char *tmp;
-
-       pos = *ppos;
-
-       if ( pos < 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (pos >= sizeof(struct ipath_flash)) {
-               ret = 0;
-               goto bail;
-       }
-
-       if (count > sizeof(struct ipath_flash) - pos)
-               count = sizeof(struct ipath_flash) - pos;
-
-       tmp = kmalloc(count, GFP_KERNEL);
-       if (!tmp) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       dd = file_inode(file)->i_private;
-       if (ipath_eeprom_read(dd, pos, tmp, count)) {
-               ipath_dev_err(dd, "failed to read from flash\n");
-               ret = -ENXIO;
-               goto bail_tmp;
-       }
-
-       if (copy_to_user(buf, tmp, count)) {
-               ret = -EFAULT;
-               goto bail_tmp;
-       }
-
-       *ppos = pos + count;
-       ret = count;
-
-bail_tmp:
-       kfree(tmp);
-
-bail:
-       return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
-                          size_t count, loff_t *ppos)
-{
-       struct ipath_devdata *dd;
-       ssize_t ret;
-       loff_t pos;
-       char *tmp;
-
-       pos = *ppos;
-
-       if (pos != 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (count != sizeof(struct ipath_flash)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       tmp = memdup_user(buf, count);
-       if (IS_ERR(tmp))
-               return PTR_ERR(tmp);
-
-       dd = file_inode(file)->i_private;
-       if (ipath_eeprom_write(dd, pos, tmp, count)) {
-               ret = -ENXIO;
-               ipath_dev_err(dd, "failed to write to flash\n");
-               goto bail_tmp;
-       }
-
-       *ppos = pos + count;
-       ret = count;
-
-bail_tmp:
-       kfree(tmp);
-
-bail:
-       return ret;
-}
-
-static const struct file_operations flash_ops = {
-       .read = flash_read,
-       .write = flash_write,
-       .llseek = default_llseek,
-};
-
-static int create_device_files(struct super_block *sb,
-                              struct ipath_devdata *dd)
-{
-       struct dentry *dir, *tmp;
-       char unit[10];
-       int ret;
-
-       snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
-       ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
-                         &simple_dir_operations, dd);
-       if (ret) {
-               printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
-               goto bail;
-       }
-
-       ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
-                         &atomic_counters_ops, dd);
-       if (ret) {
-               printk(KERN_ERR "create_file(%s/atomic_counters) "
-                      "failed: %d\n", unit, ret);
-               goto bail;
-       }
-
-       ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
-                         &flash_ops, dd);
-       if (ret) {
-               printk(KERN_ERR "create_file(%s/flash) "
-                      "failed: %d\n", unit, ret);
-               goto bail;
-       }
-
-bail:
-       return ret;
-}
-
-static int remove_file(struct dentry *parent, char *name)
-{
-       struct dentry *tmp;
-       int ret;
-
-       tmp = lookup_one_len(name, parent, strlen(name));
-
-       if (IS_ERR(tmp)) {
-               ret = PTR_ERR(tmp);
-               goto bail;
-       }
-
-       spin_lock(&tmp->d_lock);
-       if (simple_positive(tmp)) {
-               dget_dlock(tmp);
-               __d_drop(tmp);
-               spin_unlock(&tmp->d_lock);
-               simple_unlink(d_inode(parent), tmp);
-       } else
-               spin_unlock(&tmp->d_lock);
-
-       ret = 0;
-bail:
-       /*
-        * We don't expect clients to care about the return value, but
-        * it's there if they need it.
-        */
-       return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
-                              struct ipath_devdata *dd)
-{
-       struct dentry *dir, *root;
-       char unit[10];
-       int ret;
-
-       root = dget(sb->s_root);
-       inode_lock(d_inode(root));
-       snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
-       dir = lookup_one_len(unit, root, strlen(unit));
-
-       if (IS_ERR(dir)) {
-               ret = PTR_ERR(dir);
-               printk(KERN_ERR "Lookup of %s failed\n", unit);
-               goto bail;
-       }
-
-       remove_file(dir, "flash");
-       remove_file(dir, "atomic_counters");
-       d_delete(dir);
-       ret = simple_rmdir(d_inode(root), dir);
-
-bail:
-       inode_unlock(d_inode(root));
-       dput(root);
-       return ret;
-}
-
-static int ipathfs_fill_super(struct super_block *sb, void *data,
-                             int silent)
-{
-       struct ipath_devdata *dd, *tmp;
-       unsigned long flags;
-       int ret;
-
-       static struct tree_descr files[] = {
-               [2] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
-               {""},
-       };
-
-       ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
-       if (ret) {
-               printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
-               goto bail;
-       }
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
-               spin_unlock_irqrestore(&ipath_devs_lock, flags);
-               ret = create_device_files(sb, dd);
-               if (ret)
-                       goto bail;
-               spin_lock_irqsave(&ipath_devs_lock, flags);
-       }
-
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-bail:
-       return ret;
-}
-
-static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
-                       int flags, const char *dev_name, void *data)
-{
-       struct dentry *ret;
-       ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
-       if (!IS_ERR(ret))
-               ipath_super = ret->d_sb;
-       return ret;
-}
-
-static void ipathfs_kill_super(struct super_block *s)
-{
-       kill_litter_super(s);
-       ipath_super = NULL;
-}
-
-int ipathfs_add_device(struct ipath_devdata *dd)
-{
-       int ret;
-
-       if (ipath_super == NULL) {
-               ret = 0;
-               goto bail;
-       }
-
-       ret = create_device_files(ipath_super, dd);
-
-bail:
-       return ret;
-}
-
-int ipathfs_remove_device(struct ipath_devdata *dd)
-{
-       int ret;
-
-       if (ipath_super == NULL) {
-               ret = 0;
-               goto bail;
-       }
-
-       ret = remove_device_files(ipath_super, dd);
-
-bail:
-       return ret;
-}
-
-static struct file_system_type ipathfs_fs_type = {
-       .owner =        THIS_MODULE,
-       .name =         "ipathfs",
-       .mount =        ipathfs_mount,
-       .kill_sb =      ipathfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init ipath_init_ipathfs(void)
-{
-       return register_filesystem(&ipathfs_fs_type);
-}
-
-void __exit ipath_exit_ipathfs(void)
-{
-       unregister_filesystem(&ipathfs_fs_type);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_iba6110.c b/drivers/staging/rdma/ipath/ipath_iba6110.c
deleted file mode 100644 (file)
index 5f13572..0000000
+++ /dev/null
@@ -1,1939 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the InfiniPath
- * HT chip.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/htirq.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
-
-
-/*
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- *
- * The names are in InterCap form because they're taken straight from
- * the chip specification.  Since they're only used in this file, they
- * don't pollute the rest of the source.
-*/
-
-struct _infinipath_do_not_use_kernel_regs {
-       unsigned long long Revision;
-       unsigned long long Control;
-       unsigned long long PageAlign;
-       unsigned long long PortCnt;
-       unsigned long long DebugPortSelect;
-       unsigned long long DebugPort;
-       unsigned long long SendRegBase;
-       unsigned long long UserRegBase;
-       unsigned long long CounterRegBase;
-       unsigned long long Scratch;
-       unsigned long long ReservedMisc1;
-       unsigned long long InterruptConfig;
-       unsigned long long IntBlocked;
-       unsigned long long IntMask;
-       unsigned long long IntStatus;
-       unsigned long long IntClear;
-       unsigned long long ErrorMask;
-       unsigned long long ErrorStatus;
-       unsigned long long ErrorClear;
-       unsigned long long HwErrMask;
-       unsigned long long HwErrStatus;
-       unsigned long long HwErrClear;
-       unsigned long long HwDiagCtrl;
-       unsigned long long MDIO;
-       unsigned long long IBCStatus;
-       unsigned long long IBCCtrl;
-       unsigned long long ExtStatus;
-       unsigned long long ExtCtrl;
-       unsigned long long GPIOOut;
-       unsigned long long GPIOMask;
-       unsigned long long GPIOStatus;
-       unsigned long long GPIOClear;
-       unsigned long long RcvCtrl;
-       unsigned long long RcvBTHQP;
-       unsigned long long RcvHdrSize;
-       unsigned long long RcvHdrCnt;
-       unsigned long long RcvHdrEntSize;
-       unsigned long long RcvTIDBase;
-       unsigned long long RcvTIDCnt;
-       unsigned long long RcvEgrBase;
-       unsigned long long RcvEgrCnt;
-       unsigned long long RcvBufBase;
-       unsigned long long RcvBufSize;
-       unsigned long long RxIntMemBase;
-       unsigned long long RxIntMemSize;
-       unsigned long long RcvPartitionKey;
-       unsigned long long ReservedRcv[10];
-       unsigned long long SendCtrl;
-       unsigned long long SendPIOBufBase;
-       unsigned long long SendPIOSize;
-       unsigned long long SendPIOBufCnt;
-       unsigned long long SendPIOAvailAddr;
-       unsigned long long TxIntMemBase;
-       unsigned long long TxIntMemSize;
-       unsigned long long ReservedSend[9];
-       unsigned long long SendBufferError;
-       unsigned long long SendBufferErrorCONT1;
-       unsigned long long SendBufferErrorCONT2;
-       unsigned long long SendBufferErrorCONT3;
-       unsigned long long ReservedSBE[4];
-       unsigned long long RcvHdrAddr0;
-       unsigned long long RcvHdrAddr1;
-       unsigned long long RcvHdrAddr2;
-       unsigned long long RcvHdrAddr3;
-       unsigned long long RcvHdrAddr4;
-       unsigned long long RcvHdrAddr5;
-       unsigned long long RcvHdrAddr6;
-       unsigned long long RcvHdrAddr7;
-       unsigned long long RcvHdrAddr8;
-       unsigned long long ReservedRHA[7];
-       unsigned long long RcvHdrTailAddr0;
-       unsigned long long RcvHdrTailAddr1;
-       unsigned long long RcvHdrTailAddr2;
-       unsigned long long RcvHdrTailAddr3;
-       unsigned long long RcvHdrTailAddr4;
-       unsigned long long RcvHdrTailAddr5;
-       unsigned long long RcvHdrTailAddr6;
-       unsigned long long RcvHdrTailAddr7;
-       unsigned long long RcvHdrTailAddr8;
-       unsigned long long ReservedRHTA[7];
-       unsigned long long Sync;        /* Software only */
-       unsigned long long Dump;        /* Software only */
-       unsigned long long SimVer;      /* Software only */
-       unsigned long long ReservedSW[5];
-       unsigned long long SerdesConfig0;
-       unsigned long long SerdesConfig1;
-       unsigned long long SerdesStatus;
-       unsigned long long XGXSConfig;
-       unsigned long long ReservedSW2[4];
-};
-
-struct _infinipath_do_not_use_counters {
-       __u64 LBIntCnt;
-       __u64 LBFlowStallCnt;
-       __u64 Reserved1;
-       __u64 TxUnsupVLErrCnt;
-       __u64 TxDataPktCnt;
-       __u64 TxFlowPktCnt;
-       __u64 TxDwordCnt;
-       __u64 TxLenErrCnt;
-       __u64 TxMaxMinLenErrCnt;
-       __u64 TxUnderrunCnt;
-       __u64 TxFlowStallCnt;
-       __u64 TxDroppedPktCnt;
-       __u64 RxDroppedPktCnt;
-       __u64 RxDataPktCnt;
-       __u64 RxFlowPktCnt;
-       __u64 RxDwordCnt;
-       __u64 RxLenErrCnt;
-       __u64 RxMaxMinLenErrCnt;
-       __u64 RxICRCErrCnt;
-       __u64 RxVCRCErrCnt;
-       __u64 RxFlowCtrlErrCnt;
-       __u64 RxBadFormatCnt;
-       __u64 RxLinkProblemCnt;
-       __u64 RxEBPCnt;
-       __u64 RxLPCRCErrCnt;
-       __u64 RxBufOvflCnt;
-       __u64 RxTIDFullErrCnt;
-       __u64 RxTIDValidErrCnt;
-       __u64 RxPKeyMismatchCnt;
-       __u64 RxP0HdrEgrOvflCnt;
-       __u64 RxP1HdrEgrOvflCnt;
-       __u64 RxP2HdrEgrOvflCnt;
-       __u64 RxP3HdrEgrOvflCnt;
-       __u64 RxP4HdrEgrOvflCnt;
-       __u64 RxP5HdrEgrOvflCnt;
-       __u64 RxP6HdrEgrOvflCnt;
-       __u64 RxP7HdrEgrOvflCnt;
-       __u64 RxP8HdrEgrOvflCnt;
-       __u64 Reserved6;
-       __u64 Reserved7;
-       __u64 IBStatusChangeCnt;
-       __u64 IBLinkErrRecoveryCnt;
-       __u64 IBLinkDownedCnt;
-       __u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
-       struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-       struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_ht_kregs = {
-       .kr_control = IPATH_KREG_OFFSET(Control),
-       .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-       .kr_debugport = IPATH_KREG_OFFSET(DebugPort),
-       .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-       .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-       .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-       .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-       .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-       .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-       .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-       .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-       .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-       .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-       .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-       .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-       .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-       .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-       .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-       .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-       .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-       .kr_intclear = IPATH_KREG_OFFSET(IntClear),
-       .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
-       .kr_intmask = IPATH_KREG_OFFSET(IntMask),
-       .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-       .kr_mdio = IPATH_KREG_OFFSET(MDIO),
-       .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-       .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-       .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-       .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-       .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-       .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-       .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-       .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-       .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-       .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-       .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-       .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-       .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-       .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-       .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-       .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-       .kr_revision = IPATH_KREG_OFFSET(Revision),
-       .kr_scratch = IPATH_KREG_OFFSET(Scratch),
-       .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-       .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-       .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
-       .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
-       .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
-       .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
-       .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-       .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-       .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-       .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-       .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
-       .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
-       .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
-       .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-       /*
-        * These should not be used directly via ipath_write_kreg64(),
-        * use them with ipath_write_kreg64_port(),
-        */
-       .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-       .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
-};
-
-static const struct ipath_cregs ipath_ht_cregs = {
-       .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-       .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-       .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-       .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-       .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-       .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-       .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-       .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-       .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-       .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-       .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-       .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-       /* calc from Reg_CounterRegBase + offset */
-       .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-       .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-       .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-       .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-       .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-       .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-       .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-       .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-       .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-       .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-       .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-       .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-       .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-       .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-       .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-       .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-       .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-       .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-       .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-       .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-       .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
-#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR   0x0000000000800000ULL
-#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR   0x0000000001000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR   0x0000000002000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR   0x0000000004000000ULL
-#define INFINIPATH_HWE_HTCMISCERR4          0x0000000008000000ULL
-#define INFINIPATH_HWE_HTCMISCERR5          0x0000000010000000ULL
-#define INFINIPATH_HWE_HTCMISCERR6          0x0000000020000000ULL
-#define INFINIPATH_HWE_HTCMISCERR7          0x0000000040000000ULL
-#define INFINIPATH_HWE_HTCBUSTREQPARITYERR  0x0000000080000000ULL
-#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
-#define INFINIPATH_HWE_HTCBUSIREQPARITYERR  0x0000000200000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_FBSLIP        0x0200000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_RFSLIP        0x0400000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_FBSLIP        0x0800000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
-
-#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6110_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_CORRECT     0x0000000000008000
-
-
-/* TID entries (memory), HT-only */
-#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL        /* 40 bits valid */
-#define INFINIPATH_RT_VALID 0x8000000000000000ULL
-#define INFINIPATH_RT_ADDR_SHIFT 0
-#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
-#define INFINIPATH_RT_BUFSIZE_SHIFT 48
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET          0x7ULL
-
-/*
- * masks and bits that are different in different chips, or present only
- * in one
- */
-static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
-    INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
-static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
-    INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
-
-static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA \
-       (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL \
-       (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-/* keep the code below somewhat more readable; not used elsewhere */
-#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |    \
-                               infinipath_hwe_htclnkabyte1crcerr)
-#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr |    \
-                               infinipath_hwe_htclnkbbyte1crcerr)
-#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |    \
-                               infinipath_hwe_htclnkbbyte0crcerr)
-#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr |    \
-                               infinipath_hwe_htclnkbbyte1crcerr)
-
-static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
-                         char *msg, size_t msgl)
-{
-       char bitsmsg[64];
-       ipath_err_t crcbits = hwerrs &
-               (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
-       /* don't check if 8bit HT */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-               crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
-       /* don't check if 8bit HT */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-               crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
-       /*
-        * we'll want to ignore link errors on link that is
-        * not in use, if any.  For now, complain about both
-        */
-       if (crcbits) {
-               u16 ctrl0, ctrl1;
-               snprintf(bitsmsg, sizeof bitsmsg,
-                        "[HT%s lane %s CRC (%llx); powercycle to completely clear]",
-                        !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
-                        "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
-                                   ? "1 (B)" : "0+1 (A+B)"),
-                        !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
-                        : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
-                           "0+1"), (unsigned long long) crcbits);
-               strlcat(msg, bitsmsg, msgl);
-
-               /*
-                * print extra info for debugging.  slave/primary
-                * config word 4, 8 (link control 0, 1)
-                */
-
-               if (pci_read_config_word(dd->pcidev,
-                                        dd->ipath_ht_slave_off + 0x4,
-                                        &ctrl0))
-                       dev_info(&dd->pcidev->dev, "Couldn't read "
-                                "linkctrl0 of slave/primary "
-                                "config block\n");
-               else if (!(ctrl0 & 1 << 6))
-                       /* not if EOC bit set */
-                       ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
-                                 ((ctrl0 >> 8) & 7) ? " CRC" : "",
-                                 ((ctrl0 >> 4) & 1) ? "linkfail" :
-                                 "");
-               if (pci_read_config_word(dd->pcidev,
-                                        dd->ipath_ht_slave_off + 0x8,
-                                        &ctrl1))
-                       dev_info(&dd->pcidev->dev, "Couldn't read "
-                                "linkctrl1 of slave/primary "
-                                "config block\n");
-               else if (!(ctrl1 & 1 << 6))
-                       /* not if EOC bit set */
-                       ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
-                                 ((ctrl1 >> 8) & 7) ? " CRC" : "",
-                                 ((ctrl1 >> 4) & 1) ? "linkfail" :
-                                 "");
-
-               /* disable until driver reloaded */
-               dd->ipath_hwerrmask &= ~crcbits;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-               ipath_dbg("HT crc errs: %s\n", msg);
-       } else
-               ipath_dbg("ignoring HT crc errors 0x%llx, "
-                         "not in use\n", (unsigned long long)
-                         (hwerrs & (_IPATH_HTLINK0_CRCBITS |
-                                    _IPATH_HTLINK1_CRCBITS)));
-}
-
-/* 6110 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
-       INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"),
-       INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"),
-       INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"),
-       INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"),
-       INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"),
-       INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"),
-       INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
-       INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
-                       INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
-                       << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
-                         << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_ht_txe_recover(struct ipath_devdata *dd)
-{
-       ++ipath_stats.sps_txeparity;
-       dev_info(&dd->pcidev->dev,
-               "Recovering from TXE PIO parity error\n");
-}
-
-
-/**
- * ipath_ht_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use.  Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue.  We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-                                    size_t msgl)
-{
-       ipath_err_t hwerrs;
-       u32 bits, ctrl;
-       int isfatal = 0;
-       char bitsmsg[64];
-       int log_idx;
-
-       hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-
-       if (!hwerrs) {
-               ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-               /*
-                * better than printing cofusing messages
-                * This seems to be related to clearing the crc error, or
-                * the pll error during init.
-                */
-               goto bail;
-       } else if (hwerrs == -1LL) {
-               ipath_dev_err(dd, "Read of hardware error status failed "
-                             "(all bits set); ignoring\n");
-               goto bail;
-       }
-       ipath_stats.sps_hwerrs++;
-
-       /* Always clear the error status register, except MEMBISTFAIL,
-        * regardless of whether we continue or stop using the chip.
-        * We want that set so we know it failed, even across driver reload.
-        * We'll still ignore it in the hwerrmask.  We do this partly for
-        * diagnostics, but also for support */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-       hwerrs &= dd->ipath_hwerrmask;
-
-       /* We log some errors to EEPROM, check if we have any of those. */
-       for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
-               if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
-                       ipath_inc_eeprom_err(dd, log_idx, 1);
-
-       /*
-        * make sure we get this much out, unless told to be quiet,
-        * it's a parity error we may recover from,
-        * or it's occurred within the last 5 seconds
-        */
-       if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
-               RXE_EAGER_PARITY)) ||
-               (ipath_debug & __IPATH_VERBDBG))
-               dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-                        "(cleared)\n", (unsigned long long) hwerrs);
-       dd->ipath_lasthwerror |= hwerrs;
-
-       if (hwerrs & ~dd->ipath_hwe_bitsextant)
-               ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-                             "%llx set\n", (unsigned long long)
-                             (hwerrs & ~dd->ipath_hwe_bitsextant));
-
-       ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-       if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
-               /*
-                * parity errors in send memory are recoverable,
-                * just cancel the send (if indicated in * sendbuffererror),
-                * count the occurrence, unfreeze (if no other handled
-                * hardware error bits are set), and continue. They can
-                * occur if a processor speculative read is done to the PIO
-                * buffer while we are sending a packet, for example.
-                */
-               if (hwerrs & TXE_PIO_PARITY) {
-                       ipath_ht_txe_recover(dd);
-                       hwerrs &= ~TXE_PIO_PARITY;
-               }
-
-               if (!hwerrs) {
-                       ipath_dbg("Clearing freezemode on ignored or "
-                                 "recovered hardware error\n");
-                       ipath_clear_freeze(dd);
-               }
-       }
-
-       *msg = '\0';
-
-       /*
-        * may someday want to decode into which bits are which
-        * functional area for parity errors, etc.
-        */
-       if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
-                     << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
-               bits = (u32) ((hwerrs >>
-                              INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
-                             INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
-                        bits);
-               strlcat(msg, bitsmsg, msgl);
-       }
-
-       ipath_format_hwerrors(hwerrs,
-                             ipath_6110_hwerror_msgs,
-                             ARRAY_SIZE(ipath_6110_hwerror_msgs),
-                             msg, msgl);
-
-       if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
-               hwerr_crcbits(dd, hwerrs, msg, msgl);
-
-       if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-               strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
-                       msgl);
-               /* ignore from now on, so disable until driver reloaded */
-               dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-       }
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |       \
-                        INFINIPATH_HWE_COREPLL_RFSLIP |        \
-                        INFINIPATH_HWE_HTBPLL_FBSLIP |         \
-                        INFINIPATH_HWE_HTBPLL_RFSLIP |         \
-                        INFINIPATH_HWE_HTAPLL_FBSLIP |         \
-                        INFINIPATH_HWE_HTAPLL_RFSLIP)
-
-       if (hwerrs & _IPATH_PLL_FAIL) {
-               snprintf(bitsmsg, sizeof bitsmsg,
-                        "[PLL failed (%llx), InfiniPath hardware unusable]",
-                        (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
-               strlcat(msg, bitsmsg, msgl);
-               /* ignore from now on, so disable until driver reloaded */
-               dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-       }
-
-       if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-               /*
-                * If it occurs, it is left masked since the eternal
-                * interface is unused
-                */
-               dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-       }
-
-       if (hwerrs) {
-               /*
-                * if any set that we aren't ignoring; only
-                * make the complaint once, in case it's stuck
-                * or recurring, and we get here multiple
-                * times.
-                * force link down, so switch knows, and
-                * LEDs are turned off
-                */
-               if (dd->ipath_flags & IPATH_INITTED) {
-                       ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-                       ipath_setup_ht_setextled(dd,
-                               INFINIPATH_IBCS_L_STATE_DOWN,
-                               INFINIPATH_IBCS_LT_STATE_DISABLED);
-                       ipath_dev_err(dd, "Fatal Hardware Error (freeze "
-                                         "mode), no longer usable, SN %.16s\n",
-                                         dd->ipath_serial);
-                       isfatal = 1;
-               }
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-               /* mark as having had error */
-               *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-               /*
-                * mark as not usable, at a minimum until driver
-                * is reloaded, probably until reboot, since no
-                * other reset is possible.
-                */
-               dd->ipath_flags &= ~IPATH_INITTED;
-       } else {
-               *msg = 0; /* recovered from all of them */
-       }
-       if (*msg)
-               ipath_dev_err(dd, "%s hardware error\n", msg);
-       if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
-               /*
-                * for status file; if no trailing brace is copied,
-                * we'll know it was truncated.
-                */
-               snprintf(dd->ipath_freezemsg,
-                        dd->ipath_freezelen, "{%s}", msg);
-
-bail:;
-}
-
-/**
- * ipath_ht_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * fill in the board name, based on the board revision register
- */
-static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
-                             size_t namelen)
-{
-       char *n = NULL;
-       u8 boardrev = dd->ipath_boardrev;
-       int ret = 0;
-
-       switch (boardrev) {
-       case 5:
-               /*
-                * original production board; two production levels, with
-                * different serial number ranges.   See ipath_ht_early_init() for
-                * case where we enable IPATH_GPIO_INTR for later serial # range.
-                * Original 112* serial number is no longer supported.
-                */
-               n = "InfiniPath_QHT7040";
-               break;
-       case 7:
-               /* small form factor production board */
-               n = "InfiniPath_QHT7140";
-               break;
-       default:                /* don't know, just print the number */
-               ipath_dev_err(dd, "Don't yet know about board "
-                             "with ID %u\n", boardrev);
-               snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
-                        boardrev);
-               break;
-       }
-       if (n)
-               snprintf(name, namelen, "%s", n);
-
-       if (ret) {
-               ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
-               goto bail;
-       }
-       if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
-               dd->ipath_minrev > 4)) {
-               /*
-                * This version of the driver only supports Rev 3.2 - 3.4
-                */
-               ipath_dev_err(dd,
-                             "Unsupported InfiniPath hardware revision %u.%u!\n",
-                             dd->ipath_majrev, dd->ipath_minrev);
-               ret = 1;
-               goto bail;
-       }
-       /*
-        * pkt/word counters are 32 bit, and therefore wrap fast enough
-        * that we snapshot them from a timer, and maintain 64 bit shadow
-        * copies
-        */
-       dd->ipath_flags |= IPATH_32BITCOUNTERS;
-       dd->ipath_flags |= IPATH_GPIO_INTR;
-       if (dd->ipath_lbus_speed != 800)
-               ipath_dev_err(dd,
-                             "Incorrectly configured for HT @ %uMHz\n",
-                             dd->ipath_lbus_speed);
-
-       /*
-        * set here, not in ipath_init_*_funcs because we have to do
-        * it after we can read chip registers.
-        */
-       dd->ipath_ureg_align =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
-bail:
-       return ret;
-}
-
-static void ipath_check_htlink(struct ipath_devdata *dd)
-{
-       u8 linkerr, link_off, i;
-
-       for (i = 0; i < 2; i++) {
-               link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
-               if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
-                       dev_info(&dd->pcidev->dev, "Couldn't read "
-                                "linkerror%d of HT slave/primary block\n",
-                                i);
-               else if (linkerr & 0xf0) {
-                       ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-                                  "clearing\n", linkerr >> 4, i);
-                       /*
-                        * writing the linkerr bits that are set should
-                        * clear them
-                        */
-                       if (pci_write_config_byte(dd->pcidev, link_off,
-                                                 linkerr))
-                               ipath_dbg("Failed write to clear HT "
-                                         "linkerror%d\n", i);
-                       if (pci_read_config_byte(dd->pcidev, link_off,
-                                                &linkerr))
-                               dev_info(&dd->pcidev->dev,
-                                        "Couldn't reread linkerror%d of "
-                                        "HT slave/primary block\n", i);
-                       else if (linkerr & 0xf0)
-                               dev_info(&dd->pcidev->dev,
-                                        "HT linkerror%d bits 0x%x "
-                                        "couldn't be cleared\n",
-                                        i, linkerr >> 4);
-               }
-       }
-}
-
-static int ipath_setup_ht_reset(struct ipath_devdata *dd)
-{
-       ipath_dbg("No reset possible for this InfiniPath hardware\n");
-       return 0;
-}
-
-#define HT_INTR_DISC_CONFIG  0x80      /* HT interrupt and discovery cap */
-#define HT_INTR_REG_INDEX    2 /* intconfig requires indirect accesses */
-
-/*
- * Bits 13-15 of command==0 is slave/primary block.  Clear any HT CRC
- * errors.  We only bother to do this at load time, because it's OK if
- * it happened before we were loaded (first time after boot/reset),
- * but any time after that, it's fatal anyway.  Also need to not check
- * for upper byte errors if we are in 8 bit mode, so figure out
- * our width.  For now, at least, also complain if it's 8 bit.
- */
-static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
-                            int pos, u8 cap_type)
-{
-       u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
-       u16 linkctrl = 0;
-       int i;
-
-       dd->ipath_ht_slave_off = pos;
-       /* command word, master_host bit */
-       /* master host || slave */
-       if ((cap_type >> 2) & 1)
-               link_a_b_off = 4;
-       else
-               link_a_b_off = 0;
-       ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
-                  link_a_b_off ? 1 : 0,
-                  link_a_b_off ? 'B' : 'A');
-
-       link_a_b_off += pos;
-
-       /*
-        * check both link control registers; clear both HT CRC sets if
-        * necessary.
-        */
-       for (i = 0; i < 2; i++) {
-               link_off = pos + i * 4 + 0x4;
-               if (pci_read_config_word(pdev, link_off, &linkctrl))
-                       ipath_dev_err(dd, "Couldn't read HT link control%d "
-                                     "register\n", i);
-               else if (linkctrl & (0xf << 8)) {
-                       ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
-                                  "bits %x\n", i, linkctrl & (0xf << 8));
-                       /*
-                        * now write them back to clear the error.
-                        */
-                       pci_write_config_word(pdev, link_off,
-                                             linkctrl & (0xf << 8));
-               }
-       }
-
-       /*
-        * As with HT CRC bits, same for protocol errors that might occur
-        * during boot.
-        */
-       for (i = 0; i < 2; i++) {
-               link_off = pos + i * 4 + 0xd;
-               if (pci_read_config_byte(pdev, link_off, &linkerr))
-                       dev_info(&pdev->dev, "Couldn't read linkerror%d "
-                                "of HT slave/primary block\n", i);
-               else if (linkerr & 0xf0) {
-                       ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-                                  "clearing\n", linkerr >> 4, i);
-                       /*
-                        * writing the linkerr bits that are set will clear
-                        * them
-                        */
-                       if (pci_write_config_byte
-                           (pdev, link_off, linkerr))
-                               ipath_dbg("Failed write to clear HT "
-                                         "linkerror%d\n", i);
-                       if (pci_read_config_byte(pdev, link_off, &linkerr))
-                               dev_info(&pdev->dev, "Couldn't reread "
-                                        "linkerror%d of HT slave/primary "
-                                        "block\n", i);
-                       else if (linkerr & 0xf0)
-                               dev_info(&pdev->dev, "HT linkerror%d bits "
-                                        "0x%x couldn't be cleared\n",
-                                        i, linkerr >> 4);
-               }
-       }
-
-       /*
-        * this is just for our link to the host, not devices connected
-        * through tunnel.
-        */
-
-       if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
-               ipath_dev_err(dd, "Couldn't read HT link width "
-                             "config register\n");
-       else {
-               u32 width;
-               switch (linkwidth & 7) {
-               case 5:
-                       width = 4;
-                       break;
-               case 4:
-                       width = 2;
-                       break;
-               case 3:
-                       width = 32;
-                       break;
-               case 1:
-                       width = 16;
-                       break;
-               case 0:
-               default:        /* if wrong, assume 8 bit */
-                       width = 8;
-                       break;
-               }
-
-               dd->ipath_lbus_width = width;
-
-               if (linkwidth != 0x11) {
-                       ipath_dev_err(dd, "Not configured for 16 bit HT "
-                                     "(%x)\n", linkwidth);
-                       if (!(linkwidth & 0xf)) {
-                               ipath_dbg("Will ignore HT lane1 errors\n");
-                               dd->ipath_flags |= IPATH_8BIT_IN_HT0;
-                       }
-               }
-       }
-
-       /*
-        * this is just for our link to the host, not devices connected
-        * through tunnel.
-        */
-       if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
-               ipath_dev_err(dd, "Couldn't read HT link frequency "
-                             "config register\n");
-       else {
-               u32 speed;
-               switch (linkwidth & 0xf) {
-               case 6:
-                       speed = 1000;
-                       break;
-               case 5:
-                       speed = 800;
-                       break;
-               case 4:
-                       speed = 600;
-                       break;
-               case 3:
-                       speed = 500;
-                       break;
-               case 2:
-                       speed = 400;
-                       break;
-               case 1:
-                       speed = 300;
-                       break;
-               default:
-                       /*
-                        * assume reserved and vendor-specific are 200...
-                        */
-               case 0:
-                       speed = 200;
-                       break;
-               }
-               dd->ipath_lbus_speed = speed;
-       }
-
-       snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
-               "HyperTransport,%uMHz,x%u\n",
-               dd->ipath_lbus_speed,
-               dd->ipath_lbus_width);
-}
-
-static int ipath_ht_intconfig(struct ipath_devdata *dd)
-{
-       int ret;
-
-       if (dd->ipath_intconfig) {
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
-                                dd->ipath_intconfig);  /* interrupt address */
-               ret = 0;
-       } else {
-               ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
-                             "interrupt address\n");
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
-                               struct ht_irq_msg *msg)
-{
-       struct ipath_devdata *dd = pci_get_drvdata(dev);
-       u64 prev_intconfig = dd->ipath_intconfig;
-
-       dd->ipath_intconfig = msg->address_lo;
-       dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
-
-       /*
-        * If the previous value of dd->ipath_intconfig is zero, we're
-        * getting configured for the first time, and must not program the
-        * intconfig register here (it will be programmed later, when the
-        * hardware is ready).  Otherwise, we should.
-        */
-       if (prev_intconfig)
-               ipath_ht_intconfig(dd);
-}
-
-/**
- * ipath_setup_ht_config - setup the interruptconfig register
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * setup the interruptconfig register from the HT config info.
- * Also clear CRC errors in HT linkcontrol, if necessary.
- * This is done only for the real hardware.  It is done before
- * chip address space is initted, so can't touch infinipath registers
- */
-static int ipath_setup_ht_config(struct ipath_devdata *dd,
-                                struct pci_dev *pdev)
-{
-       int pos, ret;
-
-       ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
-       if (ret < 0) {
-               ipath_dev_err(dd, "Couldn't create interrupt handler: "
-                             "err %d\n", ret);
-               goto bail;
-       }
-       dd->ipath_irq = ret;
-       ret = 0;
-
-       /*
-        * Handle clearing CRC errors in linkctrl register if necessary.  We
-        * do this early, before we ever enable errors or hardware errors,
-        * mostly to avoid causing the chip to enter freeze mode.
-        */
-       pos = pci_find_capability(pdev, PCI_CAP_ID_HT);
-       if (!pos) {
-               ipath_dev_err(dd, "Couldn't find HyperTransport "
-                             "capability; no interrupts\n");
-               ret = -ENODEV;
-               goto bail;
-       }
-       do {
-               u8 cap_type;
-
-               /*
-                * The HT capability type byte is 3 bytes after the
-                * capability byte.
-                */
-               if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
-                       dev_info(&pdev->dev, "Couldn't read config "
-                                "command @ %d\n", pos);
-                       continue;
-               }
-               if (!(cap_type & 0xE0))
-                       slave_or_pri_blk(dd, pdev, pos, cap_type);
-       } while ((pos = pci_find_next_capability(pdev, pos,
-                                                PCI_CAP_ID_HT)));
-
-       dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Called during driver unload.
- * This is currently a nop for the HT chip, not for all chips
- */
-static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
-{
-}
-
-/**
- * ipath_setup_ht_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * Set the state of the two external LEDs, to indicate physical and
- * logical state of IB link.   For this chip (at least with recommended
- * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
- * (logical state)
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
-                                    u64 lst, u64 ltst)
-{
-       u64 extctl;
-       unsigned long flags = 0;
-
-       /* the diags use the LED to indicate diag info, so we leave
-        * the external LED alone when the diags are running */
-       if (ipath_diag_inuse)
-               return;
-
-       /* Allow override of LED display for, e.g. Locating system in rack */
-       if (dd->ipath_led_override) {
-               ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
-                       ? INFINIPATH_IBCS_LT_STATE_LINKUP
-                       : INFINIPATH_IBCS_LT_STATE_DISABLED;
-               lst = (dd->ipath_led_override & IPATH_LED_LOG)
-                       ? INFINIPATH_IBCS_L_STATE_ACTIVE
-                       : INFINIPATH_IBCS_L_STATE_DOWN;
-       }
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       /*
-        * start by setting both LED control bits to off, then turn
-        * on the appropriate bit(s).
-        */
-       if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
-               /*
-                * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
-                * is inverted,  because it is normally used to indicate
-                * a hardware fault at reset, if there were errors
-                */
-               extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
-                       | INFINIPATH_EXTC_LEDGBLERR_OFF;
-               if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-                       extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
-               if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-                       extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
-       } else {
-               extctl = dd->ipath_extctrl &
-                       ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-                         INFINIPATH_EXTC_LED2PRIPORT_ON);
-               if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-                       extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-               if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-                       extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-       }
-       dd->ipath_extctrl = extctl;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-static void ipath_init_ht_variables(struct ipath_devdata *dd)
-{
-       /*
-        * setup the register offsets, since they are different for each
-        * chip
-        */
-       dd->ipath_kregs = &ipath_ht_kregs;
-       dd->ipath_cregs = &ipath_ht_cregs;
-
-       dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-       dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-       dd->ipath_gpio_sda = IPATH_GPIO_SDA;
-       dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
-       /*
-        * Fill in data for field-values that change in newer chips.
-        * We dynamically specify only the mask for LINKTRAININGSTATE
-        * and only the shift for LINKSTATE, as they are the only ones
-        * that change.  Also precalculate the 3 link states of interest
-        * and the combined mask.
-        */
-       dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
-       dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
-       dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-               dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-       dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-               INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-               (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-       dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-               INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-               (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-       dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-               INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-               (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
-       /*
-        * Fill in data for ibcc field-values that change in newer chips.
-        * We dynamically specify only the mask for LINKINITCMD
-        * and only the shift for LINKCMD and MAXPKTLEN, as they are
-        * the only ones that change.
-        */
-       dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
-       dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
-       dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
-       /* Fill in shifts for RcvCtrl. */
-       dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-       dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
-       dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
-       dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
-
-       dd->ipath_i_bitsextant =
-               (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-               (INFINIPATH_I_RCVAVAIL_MASK <<
-                INFINIPATH_I_RCVAVAIL_SHIFT) |
-               INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-               INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-
-       dd->ipath_e_bitsextant =
-               INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-               INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-               INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-               INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-               INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-               INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-               INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-               INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-               INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-               INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
-               INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
-               INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
-               INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-               INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
-               INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
-               INFINIPATH_E_HARDWARE;
-
-       dd->ipath_hwe_bitsextant =
-               (INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
-                INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
-               (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-                INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
-               (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-                INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-               INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
-               INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
-               INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
-               INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
-               INFINIPATH_HWE_HTCMISCERR4 |
-               INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
-               INFINIPATH_HWE_HTCMISCERR7 |
-               INFINIPATH_HWE_HTCBUSTREQPARITYERR |
-               INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
-               INFINIPATH_HWE_HTCBUSIREQPARITYERR |
-               INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
-               INFINIPATH_HWE_MEMBISTFAILED |
-               INFINIPATH_HWE_COREPLL_FBSLIP |
-               INFINIPATH_HWE_COREPLL_RFSLIP |
-               INFINIPATH_HWE_HTBPLL_FBSLIP |
-               INFINIPATH_HWE_HTBPLL_RFSLIP |
-               INFINIPATH_HWE_HTAPLL_FBSLIP |
-               INFINIPATH_HWE_HTAPLL_RFSLIP |
-               INFINIPATH_HWE_SERDESPLLFAILED |
-               INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-               INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-
-       dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-       dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-       dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-       dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
-       /*
-        * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-        * 2 is Some Misc, 3 is reserved for future.
-        */
-       dd->ipath_eep_st_masks[0].hwerrs_to_log =
-               INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-               INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
-       dd->ipath_eep_st_masks[1].hwerrs_to_log =
-               INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-               INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
-       dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
-       dd->delay_mult = 2; /* SDR, 4X, can't change */
-
-       dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-       dd->ipath_link_speed_supported = IPATH_IB_SDR;
-       dd->ipath_link_width_enabled = IB_WIDTH_4X;
-       dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-       /* these can't change for this chip, so set once */
-       dd->ipath_link_width_active = dd->ipath_link_width_enabled;
-       dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
-}
-
-/**
- * ipath_ht_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
-{
-       ipath_err_t val;
-       u64 extsval;
-
-       extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-       if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
-               ipath_dev_err(dd, "MemBIST did not complete!\n");
-       if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
-               ipath_dbg("MemBIST corrected\n");
-
-       ipath_check_htlink(dd);
-
-       /* barring bugs, all hwerrors become interrupts, which can */
-       val = -1LL;
-       /* don't look at crc lane1 if 8 bit */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-               val &= ~infinipath_hwe_htclnkabyte1crcerr;
-       /* don't look at crc lane1 if 8 bit */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-               val &= ~infinipath_hwe_htclnkbbyte1crcerr;
-
-       /*
-        * disable RXDSYNCMEMPARITY because external serdes is unused,
-        * and therefore the logic will never be used or initialized,
-        * and uninitialized state will normally result in this error
-        * being asserted.  Similarly for the external serdess pll
-        * lock signal.
-        */
-       val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
-                INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
-
-       /*
-        * Disable MISCERR4 because of an inversion in the HT core
-        * logic checking for errors that cause this bit to be set.
-        * The errata can also cause the protocol error bit to be set
-        * in the HT config space linkerror register(s).
-        */
-       val &= ~INFINIPATH_HWE_HTCMISCERR4;
-
-       /*
-        * PLL ignored because unused MDIO interface has a logic problem
-        */
-       if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
-               val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-       dd->ipath_hwerrmask = val;
-}
-
-
-
-
-/**
- * ipath_ht_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
-{
-       u64 val, config1;
-       int ret = 0, change = 0;
-
-       ipath_dbg("Trying to bringup serdes\n");
-
-       if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-           INFINIPATH_HWE_SERDESPLLFAILED)
-       {
-               ipath_dbg("At start, serdes PLL failed bit set in "
-                         "hwerrstatus, clearing and continuing\n");
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                                INFINIPATH_HWE_SERDESPLLFAILED);
-       }
-
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-       config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
-       ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
-                  "config1=%llx, sstatus=%llx xgxs %llx\n",
-                  (unsigned long long) val, (unsigned long long) config1,
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-       /* force reset on */
-       val |= INFINIPATH_SERDC0_RESET_PLL
-               /* | INFINIPATH_SERDC0_RESET_MASK */
-               ;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-       udelay(15);             /* need pll reset set at least for a bit */
-
-       if (val & INFINIPATH_SERDC0_RESET_PLL) {
-               u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
-               /* set lane resets, and tx idle, during pll reset */
-               val2 |= INFINIPATH_SERDC0_RESET_MASK |
-                       INFINIPATH_SERDC0_TXIDLE;
-               ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
-                          "%llx)\n", (unsigned long long) val2);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-                                val2);
-               /*
-                * be sure chip saw it
-                */
-               val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               /*
-                * need pll reset clear at least 11 usec before lane
-                * resets cleared; give it a few more
-                */
-               udelay(15);
-               val = val2;     /* for check below */
-       }
-
-       if (val & (INFINIPATH_SERDC0_RESET_PLL |
-                  INFINIPATH_SERDC0_RESET_MASK |
-                  INFINIPATH_SERDC0_TXIDLE)) {
-               val &= ~(INFINIPATH_SERDC0_RESET_PLL |
-                        INFINIPATH_SERDC0_RESET_MASK |
-                        INFINIPATH_SERDC0_TXIDLE);
-               /* clear them */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-                                val);
-       }
-
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-       if (val & INFINIPATH_XGXS_RESET) {
-               /* normally true after boot */
-               val &= ~INFINIPATH_XGXS_RESET;
-               change = 1;
-       }
-       if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
-            INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
-               /* need to compensate for Tx inversion in partner */
-               val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-                        INFINIPATH_XGXS_RX_POL_SHIFT);
-               val |= dd->ipath_rx_pol_inv <<
-                       INFINIPATH_XGXS_RX_POL_SHIFT;
-               change = 1;
-       }
-       if (change)
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-       /* clear current and de-emphasis bits */
-       config1 &= ~0x0ffffffff00ULL;
-       /* set current to 20ma */
-       config1 |= 0x00000000000ULL;
-       /* set de-emphasis to -5.68dB */
-       config1 |= 0x0cccc000000ULL;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
-       ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
-                  "config1=%llx, sstatus=%llx xgxs %llx\n",
-                  (unsigned long long) val, (unsigned long long) config1,
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-       return ret;             /* for now, say we always succeeded */
-}
-
-/**
- * ipath_ht_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * driver is being unloaded
- */
-static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
-{
-       u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-       val |= INFINIPATH_SERDC0_TXIDLE;
-       ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
-                 (unsigned long long) val);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_ht_put_tid(struct ipath_devdata *dd,
-                            u64 __iomem *tidptr, u32 type,
-                            unsigned long pa)
-{
-       if (!dd->ipath_kregbase)
-               return;
-
-       if (pa != dd->ipath_tidinvalid) {
-               if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
-                       dev_info(&dd->pcidev->dev,
-                                "physaddr %lx has more than "
-                                "40 bits, using only 40!!!\n", pa);
-                       pa &= INFINIPATH_RT_ADDR_MASK;
-               }
-               if (type == RCVHQ_RCV_TYPE_EAGER)
-                       pa |= dd->ipath_tidtemplate;
-               else {
-                       /* in words (fixed, full page).  */
-                       u64 lenvalid = PAGE_SIZE >> 2;
-                       lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-                       pa |= lenvalid | INFINIPATH_RT_VALID;
-               }
-       }
-
-       writeq(pa, tidptr);
-}
-
-
-/**
- * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * Used from ipath_close(), and at chip initialization.
- */
-static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-       u64 __iomem *tidbase;
-       int i;
-
-       if (!dd->ipath_kregbase)
-               return;
-
-       ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-       /*
-        * need to invalidate all of the expected TID entries for this
-        * port, so we don't have valid entries that might somehow get
-        * used (early in next use of this port, or through some bug)
-        */
-       tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-                                  dd->ipath_rcvtidbase +
-                                  port * dd->ipath_rcvtidcnt *
-                                  sizeof(*tidbase));
-       for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-               ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
-                                dd->ipath_tidinvalid);
-
-       tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-                                  dd->ipath_rcvegrbase +
-                                  port * dd->ipath_rcvegrcnt *
-                                  sizeof(*tidbase));
-
-       for (i = 0; i < dd->ipath_rcvegrcnt; i++)
-               ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
-                                dd->ipath_tidinvalid);
-}
-
-/**
- * ipath_ht_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
-{
-       dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
-       dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-       dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
-
-       /*
-        * work around chip errata bug 7358, by marking invalid tids
-        * as having max length
-        */
-       dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
-               INFINIPATH_RT_BUFSIZE_SHIFT;
-}
-
-static int ipath_ht_early_init(struct ipath_devdata *dd)
-{
-       u32 __iomem *piobuf;
-       u32 pioincr, val32;
-       int i;
-
-       /*
-        * one cache line; long IB headers will spill over into received
-        * buffer
-        */
-       dd->ipath_rcvhdrentsize = 16;
-       dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-
-       /*
-        * For HT, we allocate a somewhat overly large eager buffer,
-        * such that we can guarantee that we can receive the largest
-        * packet that we can send out.  To truly support a 4KB MTU,
-        * we need to bump this to a large value.  To date, other than
-        * testing, we have never encountered an HCA that can really
-        * send 4KB MTU packets, so we do not handle that (we'll get
-        * errors interrupts if we ever see one).
-        */
-       dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
-
-       /*
-        * the min() check here is currently a nop, but it may not
-        * always be, depending on just how we do ipath_rcvegrbufsize
-        */
-       dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
-                                dd->ipath_rcvegrbufsize);
-       dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-       ipath_ht_tidtemplate(dd);
-
-       /*
-        * zero all the TID entries at startup.  We do this for sanity,
-        * in case of a previous driver crash of some kind, and also
-        * because the chip powers up with these memories in an unknown
-        * state.  Use portcnt, not cfgports, since this is for the
-        * full chip, not for current (possibly different) configuration
-        * value.
-        * Chip Errata bug 6447
-        */
-       for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
-               ipath_ht_clear_tids(dd, val32);
-
-       /*
-        * write the pbc of each buffer, to be sure it's initialized, then
-        * cancel all the buffers, and also abort any packets that might
-        * have been in flight for some reason (the latter is for driver
-        * unload/reload, but isn't a bad idea at first init).  PIO send
-        * isn't enabled at this point, so there is no danger of sending
-        * these out on the wire.
-        * Chip Errata bug 6610
-        */
-       piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
-                                 dd->ipath_piobufbase);
-       pioincr = dd->ipath_palign / sizeof(*piobuf);
-       for (i = 0; i < dd->ipath_piobcnt2k; i++) {
-               /*
-                * reasonable word count, just to init pbc
-                */
-               writel(16, piobuf);
-               piobuf += pioincr;
-       }
-
-       ipath_get_eeprom_info(dd);
-       if (dd->ipath_boardrev == 5) {
-               /*
-                * Later production QHT7040 has same changes as QHT7140, so
-                * can use GPIO interrupts.  They have serial #'s starting
-                * with 128, rather than 112.
-                */
-               if (dd->ipath_serial[0] == '1' &&
-                   dd->ipath_serial[1] == '2' &&
-                   dd->ipath_serial[2] == '8')
-                       dd->ipath_flags |= IPATH_GPIO_INTR;
-               else {
-                       ipath_dev_err(dd, "Unsupported InfiniPath board "
-                               "(serial number %.16s)!\n",
-                               dd->ipath_serial);
-                       return 1;
-               }
-       }
-
-       if (dd->ipath_minrev >= 4) {
-               /* Rev4+ reports extra errors via internal GPIO pins */
-               dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
-               dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-                                dd->ipath_gpio_mask);
-       }
-
-       return 0;
-}
-
-
-/**
- * ipath_init_ht_get_base_info - set chip-specific flags for user code
- * @dd: the infinipath device
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-       struct ipath_base_info *kinfo = kbase;
-
-       kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
-               IPATH_RUNTIME_PIO_REGSWAPPED;
-
-       if (pd->port_dd->ipath_minrev < 4)
-               kinfo->spi_runtime_flags |= IPATH_RUNTIME_RCVHDR_COPY;
-
-       return 0;
-}
-
-static void ipath_ht_free_irq(struct ipath_devdata *dd)
-{
-       free_irq(dd->ipath_irq, dd);
-       ht_destroy_irq(dd->ipath_irq);
-       dd->ipath_irq = 0;
-       dd->ipath_intconfig = 0;
-}
-
-static struct ipath_message_header *
-ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
-       return (struct ipath_message_header *)
-               &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
-       dd->ipath_portcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-       dd->ipath_p0_rcvegrcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_ht_read_counters(struct ipath_devdata *dd,
-                                  struct infinipath_counters *cntrs)
-{
-       cntrs->LBIntCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
-       cntrs->LBFlowStallCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
-       cntrs->TxSDmaDescCnt = 0;
-       cntrs->TxUnsupVLErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
-       cntrs->TxDataPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
-       cntrs->TxFlowPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
-       cntrs->TxDwordCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
-       cntrs->TxLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
-       cntrs->TxMaxMinLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
-       cntrs->TxUnderrunCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
-       cntrs->TxFlowStallCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
-       cntrs->TxDroppedPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
-       cntrs->RxDroppedPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
-       cntrs->RxDataPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
-       cntrs->RxFlowPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
-       cntrs->RxDwordCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
-       cntrs->RxLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
-       cntrs->RxMaxMinLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
-       cntrs->RxICRCErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
-       cntrs->RxVCRCErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
-       cntrs->RxFlowCtrlErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
-       cntrs->RxBadFormatCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
-       cntrs->RxLinkProblemCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
-       cntrs->RxEBPCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
-       cntrs->RxLPCRCErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
-       cntrs->RxBufOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
-       cntrs->RxTIDFullErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
-       cntrs->RxTIDValidErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
-       cntrs->RxPKeyMismatchCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
-       cntrs->RxP0HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
-       cntrs->RxP1HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
-       cntrs->RxP2HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
-       cntrs->RxP3HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
-       cntrs->RxP4HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
-       cntrs->RxP5HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
-       cntrs->RxP6HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
-       cntrs->RxP7HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
-       cntrs->RxP8HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
-       cntrs->RxP9HdrEgrOvflCnt = 0;
-       cntrs->RxP10HdrEgrOvflCnt = 0;
-       cntrs->RxP11HdrEgrOvflCnt = 0;
-       cntrs->RxP12HdrEgrOvflCnt = 0;
-       cntrs->RxP13HdrEgrOvflCnt = 0;
-       cntrs->RxP14HdrEgrOvflCnt = 0;
-       cntrs->RxP15HdrEgrOvflCnt = 0;
-       cntrs->RxP16HdrEgrOvflCnt = 0;
-       cntrs->IBStatusChangeCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
-       cntrs->IBLinkErrRecoveryCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
-       cntrs->IBLinkDownedCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
-       cntrs->IBSymbolErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
-       cntrs->RxVL15DroppedPktCnt = 0;
-       cntrs->RxOtherLocalPhyErrCnt = 0;
-       cntrs->PcieRetryBufDiagQwordCnt = 0;
-       cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
-       cntrs->LocalLinkIntegrityErrCnt =
-               (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-               dd->ipath_lli_errs : dd->ipath_lli_errors;
-       cntrs->RxVlErrCnt = 0;
-       cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
-{
-       return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC).  Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining.  To do this right, we reset IBC
- * as well.
- */
-static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
-{
-       u64 val, prev_val;
-
-       prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-       val = prev_val | INFINIPATH_XGXS_RESET;
-       prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-       ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
-}
-
-
-static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
-       int ret;
-
-       switch (which) {
-       case IPATH_IB_CFG_LWID:
-               ret = dd->ipath_link_width_active;
-               break;
-       case IPATH_IB_CFG_SPD:
-               ret = dd->ipath_link_speed_active;
-               break;
-       case IPATH_IB_CFG_LWID_ENB:
-               ret = dd->ipath_link_width_enabled;
-               break;
-       case IPATH_IB_CFG_SPD_ENB:
-               ret = dd->ipath_link_speed_enabled;
-               break;
-       default:
-               ret =  -ENOTSUPP;
-               break;
-       }
-       return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
-       int ret = 0;
-
-       if (which == IPATH_IB_CFG_LWID_ENB)
-               dd->ipath_link_width_enabled = val;
-       else if (which == IPATH_IB_CFG_SPD_ENB)
-               dd->ipath_link_speed_enabled = val;
-       else
-               ret = -ENOTSUPP;
-       return ret;
-}
-
-
-static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
-       ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-               ipath_ib_linktrstate(dd, ibcs));
-       return 0;
-}
-
-
-/**
- * ipath_init_iba6110_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
-{
-       dd->ipath_f_intrsetup = ipath_ht_intconfig;
-       dd->ipath_f_bus = ipath_setup_ht_config;
-       dd->ipath_f_reset = ipath_setup_ht_reset;
-       dd->ipath_f_get_boardname = ipath_ht_boardname;
-       dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
-       dd->ipath_f_early_init = ipath_ht_early_init;
-       dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
-       dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
-       dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
-       dd->ipath_f_clear_tids = ipath_ht_clear_tids;
-       dd->ipath_f_put_tid = ipath_ht_put_tid;
-       dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
-       dd->ipath_f_setextled = ipath_setup_ht_setextled;
-       dd->ipath_f_get_base_info = ipath_ht_get_base_info;
-       dd->ipath_f_free_irq = ipath_ht_free_irq;
-       dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
-       dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
-       dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
-       dd->ipath_f_config_ports = ipath_ht_config_ports;
-       dd->ipath_f_read_counters = ipath_ht_read_counters;
-       dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
-       dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
-       dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
-       dd->ipath_f_config_jint = ipath_ht_config_jint;
-       dd->ipath_f_ib_updown = ipath_ht_ib_updown;
-
-       /*
-        * initialize chip-specific variables
-        */
-       ipath_init_ht_variables(dd);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_init_chip.c b/drivers/staging/rdma/ipath/ipath_init_chip.c
deleted file mode 100644 (file)
index a5eea19..0000000
+++ /dev/null
@@ -1,1062 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-/*
- * min buffers we want to have per port, after driver
- */
-#define IPATH_MIN_USER_PORT_BUFCNT 7
-
-/*
- * Number of ports we are configured to use (to allow for more pio
- * buffers per port, etc.)  Zero means use chip value.
- */
-static ushort ipath_cfgports;
-
-module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
-
-/*
- * Number of buffers reserved for driver (verbs and layered drivers.)
- * Initialized based on number of PIO buffers if not set via module interface.
- * The problem with this is that it's global, but we'll use different
- * numbers for different chip types.
- */
-static ushort ipath_kpiobufs;
-
-static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
-
-module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
-                 &ipath_kpiobufs, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
-
-/**
- * create_port0_egr - allocate the eager TID buffers
- * @dd: the infinipath device
- *
- * This code is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance.
- * This is the kernel (port0) version.
- *
- * Allocate the eager TID buffers and program them into infinipath.
- * We use the network layer alloc_skb() allocator to allocate the
- * memory, and either use the buffers as is for things like verbs
- * packets, or pass the buffers up to the ipath layered driver and
- * thence the network layer, replacing them as we do so (see
- * ipath_rcv_layer()).
- */
-static int create_port0_egr(struct ipath_devdata *dd)
-{
-       unsigned e, egrcnt;
-       struct ipath_skbinfo *skbinfo;
-       int ret;
-
-       egrcnt = dd->ipath_p0_rcvegrcnt;
-
-       skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
-       if (skbinfo == NULL) {
-               ipath_dev_err(dd, "allocation error for eager TID "
-                             "skb array\n");
-               ret = -ENOMEM;
-               goto bail;
-       }
-       for (e = 0; e < egrcnt; e++) {
-               /*
-                * This is a bit tricky in that we allocate extra
-                * space for 2 bytes of the 14 byte ethernet header.
-                * These two bytes are passed in the ipath header so
-                * the rest of the data is word aligned.  We allocate
-                * 4 bytes so that the data buffer stays word aligned.
-                * See ipath_kreceive() for more details.
-                */
-               skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL);
-               if (!skbinfo[e].skb) {
-                       ipath_dev_err(dd, "SKB allocation error for "
-                                     "eager TID %u\n", e);
-                       while (e != 0)
-                               dev_kfree_skb(skbinfo[--e].skb);
-                       vfree(skbinfo);
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-       }
-       /*
-        * After loop above, so we can test non-NULL to see if ready
-        * to use at receive, etc.
-        */
-       dd->ipath_port0_skbinfo = skbinfo;
-
-       for (e = 0; e < egrcnt; e++) {
-               dd->ipath_port0_skbinfo[e].phys =
-                 ipath_map_single(dd->pcidev,
-                                  dd->ipath_port0_skbinfo[e].skb->data,
-                                  dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
-               dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
-                                   ((char __iomem *) dd->ipath_kregbase +
-                                    dd->ipath_rcvegrbase),
-                                   RCVHQ_RCV_TYPE_EAGER,
-                                   dd->ipath_port0_skbinfo[e].phys);
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int bringup_link(struct ipath_devdata *dd)
-{
-       u64 val, ibc;
-       int ret = 0;
-
-       /* hold IBC in reset */
-       dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
-
-       /*
-        * set initial max size pkt IBC will send, including ICRC; it's the
-        * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
-        */
-       val = (dd->ipath_ibmaxlen >> 2) + 1;
-       ibc = val << dd->ibcc_mpl_shift;
-
-       /* flowcontrolwatermark is in units of KBytes */
-       ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
-       /*
-        * How often flowctrl sent.  More or less in usecs; balance against
-        * watermark value, so that in theory senders always get a flow
-        * control update in time to not let the IB link go idle.
-        */
-       ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
-       /* max error tolerance */
-       ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-       /* use "real" buffer space for */
-       ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
-       /* IB credit flow control. */
-       ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-       /* initially come up waiting for TS1, without sending anything. */
-       dd->ipath_ibcctrl = ibc;
-       /*
-        * Want to start out with both LINKCMD and LINKINITCMD in NOP
-        * (0 and 0).  Don't put linkinitcmd in ipath_ibcctrl, want that
-        * to stay a NOP. Flag that we are disabled, for the (unlikely)
-        * case that some recovery path is trying to bring the link up
-        * before we are ready.
-        */
-       ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
-               INFINIPATH_IBCC_LINKINITCMD_SHIFT;
-       dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
-       ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
-                  (unsigned long long) ibc);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
-
-       // be sure chip saw it
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-       ret = dd->ipath_f_bringup_serdes(dd);
-
-       if (ret)
-               dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
-                        "not usable\n");
-       else {
-               /* enable IBC */
-               dd->ipath_control |= INFINIPATH_C_LINKENABLE;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                                dd->ipath_control);
-       }
-
-       return ret;
-}
-
-static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
-{
-       struct ipath_portdata *pd;
-
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-       if (pd) {
-               pd->port_dd = dd;
-               pd->port_cnt = 1;
-               /* The port 0 pkey table is used by the layer interface. */
-               pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
-               pd->port_seq_cnt = 1;
-       }
-       return pd;
-}
-
-static int init_chip_first(struct ipath_devdata *dd)
-{
-       struct ipath_portdata *pd;
-       int ret = 0;
-       u64 val;
-
-       spin_lock_init(&dd->ipath_kernel_tid_lock);
-       spin_lock_init(&dd->ipath_user_tid_lock);
-       spin_lock_init(&dd->ipath_sendctrl_lock);
-       spin_lock_init(&dd->ipath_uctxt_lock);
-       spin_lock_init(&dd->ipath_sdma_lock);
-       spin_lock_init(&dd->ipath_gpio_lock);
-       spin_lock_init(&dd->ipath_eep_st_lock);
-       spin_lock_init(&dd->ipath_sdepb_lock);
-       mutex_init(&dd->ipath_eep_lock);
-
-       /*
-        * skip cfgports stuff because we are not allocating memory,
-        * and we don't want problems if the portcnt changed due to
-        * cfgports.  We do still check and report a difference, if
-        * not same (should be impossible).
-        */
-       dd->ipath_f_config_ports(dd, ipath_cfgports);
-       if (!ipath_cfgports)
-               dd->ipath_cfgports = dd->ipath_portcnt;
-       else if (ipath_cfgports <= dd->ipath_portcnt) {
-               dd->ipath_cfgports = ipath_cfgports;
-               ipath_dbg("Configured to use %u ports out of %u in chip\n",
-                         dd->ipath_cfgports, ipath_read_kreg32(dd,
-                         dd->ipath_kregs->kr_portcnt));
-       } else {
-               dd->ipath_cfgports = dd->ipath_portcnt;
-               ipath_dbg("Tried to configured to use %u ports; chip "
-                         "only supports %u\n", ipath_cfgports,
-                         ipath_read_kreg32(dd,
-                                 dd->ipath_kregs->kr_portcnt));
-       }
-       /*
-        * Allocate full portcnt array, rather than just cfgports, because
-        * cleanup iterates across all possible ports.
-        */
-       dd->ipath_pd = kcalloc(dd->ipath_portcnt, sizeof(*dd->ipath_pd),
-                              GFP_KERNEL);
-
-       if (!dd->ipath_pd) {
-               ipath_dev_err(dd, "Unable to allocate portdata array, "
-                             "failing\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       pd = create_portdata0(dd);
-       if (!pd) {
-               ipath_dev_err(dd, "Unable to allocate portdata for port "
-                             "0, failing\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-       dd->ipath_pd[0] = pd;
-
-       dd->ipath_rcvtidcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
-       dd->ipath_rcvtidbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
-       dd->ipath_rcvegrcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-       dd->ipath_rcvegrbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
-       dd->ipath_palign =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-       dd->ipath_piobufbase =
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
-       dd->ipath_piosize2k = val & ~0U;
-       dd->ipath_piosize4k = val >> 32;
-       if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
-               ipath_mtu4096 = 0; /* 4KB not supported by this chip */
-       dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
-       dd->ipath_piobcnt2k = val & ~0U;
-       dd->ipath_piobcnt4k = val >> 32;
-       dd->ipath_pio2kbase =
-               (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
-                                (dd->ipath_piobufbase & 0xffffffff));
-       if (dd->ipath_piobcnt4k) {
-               dd->ipath_pio4kbase = (u32 __iomem *)
-                       (((char __iomem *) dd->ipath_kregbase) +
-                        (dd->ipath_piobufbase >> 32));
-               /*
-                * 4K buffers take 2 pages; we use roundup just to be
-                * paranoid; we calculate it once here, rather than on
-                * ever buf allocate
-                */
-               dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
-                                         dd->ipath_palign);
-               ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
-                         "(%x aligned)\n",
-                         dd->ipath_piobcnt2k, dd->ipath_piosize2k,
-                         dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
-                         dd->ipath_piosize4k, dd->ipath_pio4kbase,
-                         dd->ipath_4kalign);
-       } else {
-               ipath_dbg("%u 2k piobufs @ %p\n",
-                         dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
-       }
-done:
-       return ret;
-}
-
-/**
- * init_chip_reset - re-initialize after a reset, or enable
- * @dd: the infinipath device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_chip_reset(struct ipath_devdata *dd)
-{
-       u32 rtmp;
-       int i;
-       unsigned long flags;
-
-       /*
-        * ensure chip does no sends or receives, tail updates, or
-        * pioavail updates while we re-initialize
-        */
-       dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
-       for (i = 0; i < dd->ipath_portcnt; i++) {
-               clear_bit(dd->ipath_r_portenable_shift + i,
-                         &dd->ipath_rcvctrl);
-               clear_bit(dd->ipath_r_intravail_shift + i,
-                         &dd->ipath_rcvctrl);
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-               dd->ipath_rcvctrl);
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl = 0U; /* no sdma, etc */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
-       if (rtmp != dd->ipath_rcvtidcnt)
-               dev_info(&dd->pcidev->dev, "tidcnt was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvtidcnt, rtmp);
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
-       if (rtmp != dd->ipath_rcvtidbase)
-               dev_info(&dd->pcidev->dev, "tidbase was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvtidbase, rtmp);
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-       if (rtmp != dd->ipath_rcvegrcnt)
-               dev_info(&dd->pcidev->dev, "egrcnt was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvegrcnt, rtmp);
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
-       if (rtmp != dd->ipath_rcvegrbase)
-               dev_info(&dd->pcidev->dev, "egrbase was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvegrbase, rtmp);
-
-       return 0;
-}
-
-static int init_pioavailregs(struct ipath_devdata *dd)
-{
-       int ret;
-
-       dd->ipath_pioavailregs_dma = dma_alloc_coherent(
-               &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
-               GFP_KERNEL);
-       if (!dd->ipath_pioavailregs_dma) {
-               ipath_dev_err(dd, "failed to allocate PIOavail reg area "
-                             "in memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       /*
-        * we really want L2 cache aligned, but for current CPUs of
-        * interest, they are the same.
-        */
-       dd->ipath_statusp = (u64 *)
-               ((char *)dd->ipath_pioavailregs_dma +
-                ((2 * L1_CACHE_BYTES +
-                  dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
-       /* copy the current value now that it's really allocated */
-       *dd->ipath_statusp = dd->_ipath_status;
-       /*
-        * setup buffer to hold freeze msg, accessible to apps,
-        * following statusp
-        */
-       dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
-       /* and its length */
-       dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
-
-       ret = 0;
-
-done:
-       return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the infinipath device
- *
- * allocate the shadow TID array, so we can ipath_munlock previous
- * entries.  It may make more sense to move the pageshadow to the
- * port data structure, so we only allocate memory for ports actually
- * in use, since we at 8k per port, now.
- */
-static void init_shadow_tids(struct ipath_devdata *dd)
-{
-       struct page **pages;
-       dma_addr_t *addrs;
-
-       pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-                       sizeof(struct page *));
-       if (!pages) {
-               ipath_dev_err(dd, "failed to allocate shadow page * "
-                             "array, no expected sends!\n");
-               dd->ipath_pageshadow = NULL;
-               return;
-       }
-
-       addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-                       sizeof(dma_addr_t));
-       if (!addrs) {
-               ipath_dev_err(dd, "failed to allocate shadow dma handle "
-                             "array, no expected sends!\n");
-               vfree(pages);
-               dd->ipath_pageshadow = NULL;
-               return;
-       }
-
-       dd->ipath_pageshadow = pages;
-       dd->ipath_physshadow = addrs;
-}
-
-static void enable_chip(struct ipath_devdata *dd, int reinit)
-{
-       u32 val;
-       u64 rcvmask;
-       unsigned long flags;
-       int i;
-
-       if (!reinit)
-               init_waitqueue_head(&ipath_state_wait);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       /* Enable PIO send, and update of PIOavail regs to memory. */
-       dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
-               INFINIPATH_S_PIOBUFAVAILUPD;
-
-       /*
-        * Set the PIO avail update threshold to host memory
-        * on chips that support it.
-        */
-       if (dd->ipath_pioupd_thresh)
-               dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
-                       << INFINIPATH_S_UPDTHRESH_SHIFT;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /*
-        * Enable kernel ports' receive and receive interrupt.
-        * Other ports done as user opens and inits them.
-        */
-       rcvmask = 1ULL;
-       dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
-               (rcvmask << dd->ipath_r_intravail_shift);
-       if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
-               dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-       /*
-        * now ready for use.  this should be cleared whenever we
-        * detect a reset, or initiate one.
-        */
-       dd->ipath_flags |= IPATH_INITTED;
-
-       /*
-        * Init our shadow copies of head from tail values,
-        * and write head values to match.
-        */
-       val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
-       ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
-
-       /* Initialize so we interrupt on next packet received */
-       ipath_write_ureg(dd, ur_rcvhdrhead,
-                        dd->ipath_rhdrhead_intr_off |
-                        dd->ipath_pd[0]->port_head, 0);
-
-       /*
-        * by now pioavail updates to memory should have occurred, so
-        * copy them into our working/shadow registers; this is in
-        * case something went wrong with abort, but mostly to get the
-        * initial values of the generation bit correct.
-        */
-       for (i = 0; i < dd->ipath_pioavregs; i++) {
-               __le64 pioavail;
-
-               /*
-                * Chip Errata bug 6641; even and odd qwords>3 are swapped.
-                */
-               if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-                       pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
-               else
-                       pioavail = dd->ipath_pioavailregs_dma[i];
-               /*
-                * don't need to worry about ipath_pioavailkernel here
-                * because we will call ipath_chg_pioavailkernel() later
-                * in initialization, to busy out buffers as needed
-                */
-               dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
-       }
-       /* can get counters, stats, etc. */
-       dd->ipath_flags |= IPATH_PRESENT;
-}
-
-static int init_housekeeping(struct ipath_devdata *dd, int reinit)
-{
-       char boardn[40];
-       int ret = 0;
-
-       /*
-        * have to clear shadow copies of registers at init that are
-        * not otherwise set here, or all kinds of bizarre things
-        * happen with driver on chip reset
-        */
-       dd->ipath_rcvhdrsize = 0;
-
-       /*
-        * Don't clear ipath_flags as 8bit mode was set before
-        * entering this func. However, we do set the linkstate to
-        * unknown, so we can watch for a transition.
-        * PRESENT is set because we want register reads to work,
-        * and the kernel infrastructure saw it in config space;
-        * We clear it if we have failures.
-        */
-       dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
-       dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
-                            IPATH_LINKDOWN | IPATH_LINKINIT);
-
-       ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
-       dd->ipath_revision =
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-
-       /*
-        * set up fundamental info we need to use the chip; we assume
-        * if the revision reg and these regs are OK, we don't need to
-        * special case the rest
-        */
-       dd->ipath_sregbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
-       dd->ipath_cregbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
-       dd->ipath_uregbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
-       ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
-                  "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
-                  dd->ipath_uregbase, dd->ipath_cregbase);
-       if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
-           || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
-           || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
-           || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
-               ipath_dev_err(dd, "Register read failures from chip, "
-                             "giving up initialization\n");
-               dd->ipath_flags &= ~IPATH_PRESENT;
-               ret = -ENODEV;
-               goto done;
-       }
-
-
-       /* clear diagctrl register, in case diags were running and crashed */
-       ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
-
-       /* clear the initial reset flag, in case first driver load */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-                        INFINIPATH_E_RESET);
-
-       ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
-                  (unsigned long long) dd->ipath_revision,
-                  dd->ipath_pcirev);
-
-       if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
-            INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
-               ipath_dev_err(dd, "Driver only handles version %d, "
-                             "chip swversion is %d (%llx), failng\n",
-                             IPATH_CHIP_SWVERSION,
-                             (int)(dd->ipath_revision >>
-                                   INFINIPATH_R_SOFTWARE_SHIFT) &
-                             INFINIPATH_R_SOFTWARE_MASK,
-                             (unsigned long long) dd->ipath_revision);
-               ret = -ENOSYS;
-               goto done;
-       }
-       dd->ipath_majrev = (u8) ((dd->ipath_revision >>
-                                 INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
-                                INFINIPATH_R_CHIPREVMAJOR_MASK);
-       dd->ipath_minrev = (u8) ((dd->ipath_revision >>
-                                 INFINIPATH_R_CHIPREVMINOR_SHIFT) &
-                                INFINIPATH_R_CHIPREVMINOR_MASK);
-       dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
-                                   INFINIPATH_R_BOARDID_SHIFT) &
-                                  INFINIPATH_R_BOARDID_MASK);
-
-       ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
-
-       snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
-                "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
-                "SW Compat %u\n",
-                IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
-                (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
-                INFINIPATH_R_ARCH_MASK,
-                dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
-                (unsigned)(dd->ipath_revision >>
-                           INFINIPATH_R_SOFTWARE_SHIFT) &
-                INFINIPATH_R_SOFTWARE_MASK);
-
-       ipath_dbg("%s", dd->ipath_boardversion);
-
-       if (ret)
-               goto done;
-
-       if (reinit)
-               ret = init_chip_reset(dd);
-       else
-               ret = init_chip_first(dd);
-
-done:
-       return ret;
-}
-
-static void verify_interrupt(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-
-       if (!dd)
-               return; /* being torn down */
-
-       /*
-        * If we don't have any interrupts, let the user know and
-        * don't bother checking again.
-        */
-       if (dd->ipath_int_counter == 0) {
-               if (!dd->ipath_f_intr_fallback(dd))
-                       dev_err(&dd->pcidev->dev, "No interrupts detected, "
-                               "not usable.\n");
-               else /* re-arm the timer to see if fallback works */
-                       mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
-       } else
-               ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
-                       dd->ipath_int_counter);
-}
-
-/**
- * ipath_init_chip - do the actual initialization sequence on the chip
- * @dd: the infinipath device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip.  This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0).  We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int ipath_init_chip(struct ipath_devdata *dd, int reinit)
-{
-       int ret = 0;
-       u32 kpiobufs, defkbufs;
-       u32 piobufs, uports;
-       u64 val;
-       struct ipath_portdata *pd;
-       gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-
-       ret = init_housekeeping(dd, reinit);
-       if (ret)
-               goto done;
-
-       /*
-        * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
-        * but then it no longer nicely fits power of two, and since
-        * we now use routines that backend onto __get_free_pages, the
-        * rest would be wasted.
-        */
-       dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
-                        dd->ipath_rcvhdrcnt);
-
-       /*
-        * Set up the shadow copies of the piobufavail registers,
-        * which we compare against the chip registers for now, and
-        * the in memory DMA'ed copies of the registers.  This has to
-        * be done early, before we calculate lastport, etc.
-        */
-       piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       /*
-        * calc number of pioavail registers, and save it; we have 2
-        * bits per buffer.
-        */
-       dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
-               / (sizeof(u64) * BITS_PER_BYTE / 2);
-       uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
-       if (piobufs > 144)
-               defkbufs = 32 + dd->ipath_pioreserved;
-       else
-               defkbufs = 16 + dd->ipath_pioreserved;
-
-       if (ipath_kpiobufs && (ipath_kpiobufs +
-               (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
-               int i = (int) piobufs -
-                       (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
-               if (i < 1)
-                       i = 1;
-               dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
-                        "%d for kernel leaves too few for %d user ports "
-                        "(%d each); using %u\n", ipath_kpiobufs,
-                        piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
-               /*
-                * shouldn't change ipath_kpiobufs, because could be
-                * different for different devices...
-                */
-               kpiobufs = i;
-       } else if (ipath_kpiobufs)
-               kpiobufs = ipath_kpiobufs;
-       else
-               kpiobufs = defkbufs;
-       dd->ipath_lastport_piobuf = piobufs - kpiobufs;
-       dd->ipath_pbufsport =
-               uports ? dd->ipath_lastport_piobuf / uports : 0;
-       /* if not an even divisor, some user ports get extra buffers */
-       dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
-               (dd->ipath_pbufsport * uports);
-       if (dd->ipath_ports_extrabuf)
-               ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
-                       "ports <= %u\n", dd->ipath_pbufsport,
-                       dd->ipath_ports_extrabuf);
-       dd->ipath_lastpioindex = 0;
-       dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
-       /* ipath_pioavailshadow initialized earlier */
-       ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
-                  "each for %u user ports\n", kpiobufs,
-                  piobufs, dd->ipath_pbufsport, uports);
-       ret = dd->ipath_f_early_init(dd);
-       if (ret) {
-               ipath_dev_err(dd, "Early initialization failure\n");
-               goto done;
-       }
-
-       /*
-        * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
-        * done after early_init.
-        */
-       dd->ipath_hdrqlast =
-               dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
-                        dd->ipath_rcvhdrentsize);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
-                        dd->ipath_rcvhdrsize);
-
-       if (!reinit) {
-               ret = init_pioavailregs(dd);
-               init_shadow_tids(dd);
-               if (ret)
-                       goto done;
-       }
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
-                        dd->ipath_pioavailregs_phys);
-
-       /*
-        * this is to detect s/w errors, which the h/w works around by
-        * ignoring the low 6 bits of address, if it wasn't aligned.
-        */
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
-       if (val != dd->ipath_pioavailregs_phys) {
-               ipath_dev_err(dd, "Catastrophic software error, "
-                             "SendPIOAvailAddr written as %lx, "
-                             "read back as %llx\n",
-                             (unsigned long) dd->ipath_pioavailregs_phys,
-                             (unsigned long long) val);
-               ret = -EINVAL;
-               goto done;
-       }
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
-
-       /*
-        * make sure we are not in freeze, and PIO send enabled, so
-        * writes to pbc happen
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
-       /*
-        * before error clears, since we expect serdes pll errors during
-        * this, the first time after reset
-        */
-       if (bringup_link(dd)) {
-               dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
-               ret = -ENETDOWN;
-               goto done;
-       }
-
-       /*
-        * clear any "expected" hwerrs from reset and/or initialization
-        * clear any that aren't enabled (at least this once), and then
-        * set the enable mask
-        */
-       dd->ipath_f_init_hwerrors(dd);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                        dd->ipath_hwerrmask);
-
-       /* clear all */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
-       /* enable errors that are masked, at least this first time. */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                        ~dd->ipath_maskederrs);
-       dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
-       dd->ipath_errormask =
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-       /* clear any interrupts up to this point (ints still not enabled) */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
-       dd->ipath_f_tidtemplate(dd);
-
-       /*
-        * Set up the port 0 (kernel) rcvhdr q and egr TIDs.  If doing
-        * re-init, the simplest way to handle this is to free
-        * existing, and re-allocate.
-        * Need to re-create rest of port 0 portdata as well.
-        */
-       pd = dd->ipath_pd[0];
-       if (reinit) {
-               struct ipath_portdata *npd;
-
-               /*
-                * Alloc and init new ipath_portdata for port0,
-                * Then free old pd. Could lead to fragmentation, but also
-                * makes later support for hot-swap easier.
-                */
-               npd = create_portdata0(dd);
-               if (npd) {
-                       ipath_free_pddata(dd, pd);
-                       dd->ipath_pd[0] = npd;
-                       pd = npd;
-               } else {
-                       ipath_dev_err(dd, "Unable to allocate portdata"
-                                     " for port 0, failing\n");
-                       ret = -ENOMEM;
-                       goto done;
-               }
-       }
-       ret = ipath_create_rcvhdrq(dd, pd);
-       if (!ret)
-               ret = create_port0_egr(dd);
-       if (ret) {
-               ipath_dev_err(dd, "failed to allocate kernel port's "
-                             "rcvhdrq and/or egr bufs\n");
-               goto done;
-       } else {
-               enable_chip(dd, reinit);
-       }
-
-       /* after enable_chip, so pioavailshadow setup */
-       ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
-
-       /*
-        * Cancel any possible active sends from early driver load.
-        * Follows early_init because some chips have to initialize
-        * PIO buffers in early_init to avoid false parity errors.
-        * After enable and ipath_chg_pioavailkernel so we can safely
-        * enable pioavail updates and PIOENABLE; packets are now
-        * ready to go out.
-        */
-       ipath_cancel_sends(dd, 1);
-
-       if (!reinit) {
-               /*
-                * Used when we close a port, for DMA already in flight
-                * at close.
-                */
-               dd->ipath_dummy_hdrq = dma_alloc_coherent(
-                       &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
-                       &dd->ipath_dummy_hdrq_phys,
-                       gfp_flags);
-               if (!dd->ipath_dummy_hdrq) {
-                       dev_info(&dd->pcidev->dev,
-                               "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
-                               dd->ipath_pd[0]->port_rcvhdrq_size);
-                       /* fallback to just 0'ing */
-                       dd->ipath_dummy_hdrq_phys = 0UL;
-               }
-       }
-
-       /*
-        * cause retrigger of pending interrupts ignored during init,
-        * even if we had errors
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-
-       if (!dd->ipath_stats_timer_active) {
-               /*
-                * first init, or after an admin disable/enable
-                * set up stats retrieval timer, even if we had errors
-                * in last portion of setup
-                */
-               setup_timer(&dd->ipath_stats_timer, ipath_get_faststats,
-                               (unsigned long)dd);
-               /* every 5 seconds; */
-               dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
-               /* takes ~16 seconds to overflow at full IB 4x bandwdith */
-               add_timer(&dd->ipath_stats_timer);
-               dd->ipath_stats_timer_active = 1;
-       }
-
-       /* Set up SendDMA if chip supports it */
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               ret = setup_sdma(dd);
-
-       /* Set up HoL state */
-       setup_timer(&dd->ipath_hol_timer, ipath_hol_event, (unsigned long)dd);
-
-       dd->ipath_hol_state = IPATH_HOL_UP;
-
-done:
-       if (!ret) {
-               *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
-               if (!dd->ipath_f_intrsetup(dd)) {
-                       /* now we can enable all interrupts from the chip */
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
-                                        -1LL);
-                       /* force re-interrupt of any pending interrupts. */
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
-                                        0ULL);
-                       /* chip is usable; mark it as initialized */
-                       *dd->ipath_statusp |= IPATH_STATUS_INITTED;
-
-                       /*
-                        * setup to verify we get an interrupt, and fallback
-                        * to an alternate if necessary and possible
-                        */
-                       if (!reinit) {
-                               setup_timer(&dd->ipath_intrchk_timer,
-                                               verify_interrupt,
-                                               (unsigned long)dd);
-                       }
-                       dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
-                       add_timer(&dd->ipath_intrchk_timer);
-               } else
-                       ipath_dev_err(dd, "No interrupts enabled, couldn't "
-                                     "setup interrupt address\n");
-
-               if (dd->ipath_cfgports > ipath_stats.sps_nports)
-                       /*
-                        * sps_nports is a global, so, we set it to
-                        * the highest number of ports of any of the
-                        * chips we find; we never decrement it, at
-                        * least for now.  Since this might have changed
-                        * over disable/enable or prior to reset, always
-                        * do the check and potentially adjust.
-                        */
-                       ipath_stats.sps_nports = dd->ipath_cfgports;
-       } else
-               ipath_dbg("Failed (%d) to initialize chip\n", ret);
-
-       /* if ret is non-zero, we probably should do some cleanup
-          here... */
-       return ret;
-}
-
-static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
-{
-       struct ipath_devdata *dd;
-       unsigned long flags;
-       unsigned short val;
-       int ret;
-
-       ret = ipath_parse_ushort(str, &val);
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       if (ret < 0)
-               goto bail;
-
-       if (val == 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-               if (dd->ipath_kregbase)
-                       continue;
-               if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
-                          (dd->ipath_cfgports *
-                           IPATH_MIN_USER_PORT_BUFCNT)))
-               {
-                       ipath_dev_err(
-                               dd,
-                               "Allocating %d PIO bufs for kernel leaves "
-                               "too few for %d user ports (%d each)\n",
-                               val, dd->ipath_cfgports - 1,
-                               IPATH_MIN_USER_PORT_BUFCNT);
-                       ret = -EINVAL;
-                       goto bail;
-               }
-               dd->ipath_lastport_piobuf =
-                       dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
-       }
-
-       ipath_kpiobufs = val;
-       ret = 0;
-bail:
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_intr.c b/drivers/staging/rdma/ipath/ipath_intr.c
deleted file mode 100644 (file)
index 0403fa2..0000000
+++ /dev/null
@@ -1,1271 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
-{
-       u32 piobcnt;
-       unsigned long sbuf[4];
-       /*
-        * it's possible that sendbuffererror could have bits set; might
-        * have already done this as a result of hardware error handling
-        */
-       piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       /* read these before writing errorclear */
-       sbuf[0] = ipath_read_kreg64(
-               dd, dd->ipath_kregs->kr_sendbuffererror);
-       sbuf[1] = ipath_read_kreg64(
-               dd, dd->ipath_kregs->kr_sendbuffererror + 1);
-       if (piobcnt > 128)
-               sbuf[2] = ipath_read_kreg64(
-                       dd, dd->ipath_kregs->kr_sendbuffererror + 2);
-       if (piobcnt > 192)
-               sbuf[3] = ipath_read_kreg64(
-                       dd, dd->ipath_kregs->kr_sendbuffererror + 3);
-       else
-               sbuf[3] = 0;
-
-       if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
-               int i;
-               if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
-                       time_after(dd->ipath_lastcancel, jiffies)) {
-                       __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
-                                         "SendbufErrs %lx %lx", sbuf[0],
-                                         sbuf[1]);
-                       if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
-                               printk(" %lx %lx ", sbuf[2], sbuf[3]);
-                       printk("\n");
-               }
-
-               for (i = 0; i < piobcnt; i++)
-                       if (test_bit(i, sbuf))
-                               ipath_disarm_piobufs(dd, i, 1);
-               /* ignore armlaunch errs for a bit */
-               dd->ipath_lastcancel = jiffies+3;
-       }
-}
-
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
-       (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
-        INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
-        INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
-        INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
-        INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
-        INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
-       (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
-        INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-        INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
-        INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
-        INFINIPATH_E_INVALIDADDR)
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers.  Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
-        (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-        INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
-        INFINIPATH_E_SPKTLEN)
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received.  This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
-       (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-        INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
-        INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
-        INFINIPATH_E_RUNEXPCHAR)
-
-static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
-{
-       u64 ignore_this_time = 0;
-
-       ipath_disarm_senderrbufs(dd);
-       if ((errs & E_SUM_LINK_PKTERRS) &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               /*
-                * This can happen when SMA is trying to bring the link
-                * up, but the IB link changes state at the "wrong" time.
-                * The IB logic then complains that the packet isn't
-                * valid.  We don't want to confuse people, so we just
-                * don't print them, except at debug
-                */
-               ipath_dbg("Ignoring packet errors %llx, because link not "
-                         "ACTIVE\n", (unsigned long long) errs);
-               ignore_this_time = errs & E_SUM_LINK_PKTERRS;
-       }
-
-       return ignore_this_time;
-}
-
-/* generic hw error messages... */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
-       { \
-               .mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a <<    \
-                         INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ),   \
-               .msg = "TXE " #a " Memory Parity"            \
-       }
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
-       { \
-               .mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a <<    \
-                         INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ),   \
-               .msg = "RXE " #a " Memory Parity"            \
-       }
-
-static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
-       INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
-       INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
-
-       INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
-       INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
-       INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
-
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
-};
-
-/**
- * ipath_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
- */
-static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
-       strlcat(msg, "[", msgl);
-       strlcat(msg, hwmsg, msgl);
-       strlcat(msg, "]", msgl);
-}
-
-/**
- * ipath_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
- */
-void ipath_format_hwerrors(u64 hwerrs,
-                          const struct ipath_hwerror_msgs *hwerrmsgs,
-                          size_t nhwerrmsgs,
-                          char *msg, size_t msgl)
-{
-       int i;
-       const int glen =
-           ARRAY_SIZE(ipath_generic_hwerror_msgs);
-
-       for (i=0; i<glen; i++) {
-               if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
-                       ipath_format_hwmsg(msg, msgl,
-                                          ipath_generic_hwerror_msgs[i].msg);
-               }
-       }
-
-       for (i=0; i<nhwerrmsgs; i++) {
-               if (hwerrs & hwerrmsgs[i].mask) {
-                       ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-               }
-       }
-}
-
-/* return the strings for the most common link states */
-static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
-       char *ret;
-       u32 state;
-
-       state = ipath_ib_state(dd, ibcs);
-       if (state == dd->ib_init)
-               ret = "Init";
-       else if (state == dd->ib_arm)
-               ret = "Arm";
-       else if (state == dd->ib_active)
-               ret = "Active";
-       else
-               ret = "Down";
-       return ret;
-}
-
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
-{
-       struct ib_event event;
-
-       event.device = &dd->verbs_dev->ibdev;
-       event.element.port_num = 1;
-       event.event = ev;
-       ib_dispatch_event(&event);
-}
-
-static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
-                                    ipath_err_t errs)
-{
-       u32 ltstate, lstate, ibstate, lastlstate;
-       u32 init = dd->ib_init;
-       u32 arm = dd->ib_arm;
-       u32 active = dd->ib_active;
-       const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-
-       lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
-       ibstate = ipath_ib_state(dd, ibcs);
-       /* linkstate at last interrupt */
-       lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
-       ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
-
-       /*
-        * Since going into a recovery state causes the link state to go
-        * down and since recovery is transitory, it is better if we "miss"
-        * ever seeing the link training state go into recovery (i.e.,
-        * ignore this transition for link state special handling purposes)
-        * without even updating ipath_lastibcstat.
-        */
-       if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
-           (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
-           (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
-               goto done;
-
-       /*
-        * if linkstate transitions into INIT from any of the various down
-        * states, or if it transitions from any of the up (INIT or better)
-        * states into any of the down states (except link recovery), then
-        * call the chip-specific code to take appropriate actions.
-        */
-       if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
-               lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
-               /* transitioned to UP */
-               if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
-                       /* link came up, so we must no longer be disabled */
-                       dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
-                       ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
-                       goto skip_ibchange; /* chip-code handled */
-               }
-       } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
-               (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
-               ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
-               ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
-               int handled;
-               handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
-               dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
-               if (handled) {
-                       ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
-                       goto skip_ibchange; /* chip-code handled */
-               }
-       }
-
-       /*
-        * Significant enough to always print and get into logs, if it was
-        * unexpected.  If it was a requested state change, we'll have
-        * already cleared the flags, so we won't print this warning
-        */
-       if ((ibstate != arm && ibstate != active) &&
-           (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
-               dev_info(&dd->pcidev->dev, "Link state changed from %s "
-                        "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
-                        "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
-       }
-
-       if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
-           ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-               u32 lastlts;
-               lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-               /*
-                * Ignore cycling back and forth from Polling.Active to
-                * Polling.Quiet while waiting for the other end of the link
-                * to come up, except to try and decide if we are connected
-                * to a live IB device or not.  We will cycle back and
-                * forth between them if no cable is plugged in, the other
-                * device is powered off or disabled, etc.
-                */
-               if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
-                   lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-                       if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
-                            (++dd->ipath_ibpollcnt == 40)) {
-                               dd->ipath_flags |= IPATH_NOCABLE;
-                               *dd->ipath_statusp |=
-                                       IPATH_STATUS_IB_NOCABLE;
-                               ipath_cdbg(LINKVERB, "Set NOCABLE\n");
-                       }
-                       ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
-                               ipath_ibcstatus_str[ltstate], ibstate);
-                       goto skip_ibchange;
-               }
-       }
-
-       dd->ipath_ibpollcnt = 0; /* not poll*, now */
-       ipath_stats.sps_iblink++;
-
-       if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
-               u64 linkrecov;
-               linkrecov = ipath_snap_cntr(dd,
-                       dd->ipath_cregs->cr_iblinkerrrecovcnt);
-               if (linkrecov != dd->ipath_lastlinkrecov) {
-                       ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
-                               (unsigned long long) ibcs,
-                               ib_linkstate(dd, ibcs),
-                               ipath_ibcstatus_str[ltstate],
-                               (unsigned long long) linkrecov);
-                       /* and no more until active again */
-                       dd->ipath_lastlinkrecov = 0;
-                       ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-                       goto skip_ibchange;
-               }
-       }
-
-       if (ibstate == init || ibstate == arm || ibstate == active) {
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
-               if (ibstate == init || ibstate == arm) {
-                       *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-                       if (dd->ipath_flags & IPATH_LINKACTIVE)
-                               signal_ib_event(dd, IB_EVENT_PORT_ERR);
-               }
-               if (ibstate == arm) {
-                       dd->ipath_flags |= IPATH_LINKARMED;
-                       dd->ipath_flags &= ~(IPATH_LINKUNK |
-                               IPATH_LINKINIT | IPATH_LINKDOWN |
-                               IPATH_LINKACTIVE | IPATH_NOCABLE);
-                       ipath_hol_down(dd);
-               } else  if (ibstate == init) {
-                       /*
-                        * set INIT and DOWN.  Down is checked by
-                        * most of the other code, but INIT is
-                        * useful to know in a few places.
-                        */
-                       dd->ipath_flags |= IPATH_LINKINIT |
-                               IPATH_LINKDOWN;
-                       dd->ipath_flags &= ~(IPATH_LINKUNK |
-                               IPATH_LINKARMED | IPATH_LINKACTIVE |
-                               IPATH_NOCABLE);
-                       ipath_hol_down(dd);
-               } else {  /* active */
-                       dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
-                               dd->ipath_cregs->cr_iblinkerrrecovcnt);
-                       *dd->ipath_statusp |=
-                               IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
-                       dd->ipath_flags |= IPATH_LINKACTIVE;
-                       dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-                               | IPATH_LINKDOWN | IPATH_LINKARMED |
-                               IPATH_NOCABLE);
-                       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-                               ipath_restart_sdma(dd);
-                       signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
-                       /* LED active not handled in chip _f_updown */
-                       dd->ipath_f_setextled(dd, lstate, ltstate);
-                       ipath_hol_up(dd);
-               }
-
-               /*
-                * print after we've already done the work, so as not to
-                * delay the state changes and notifications, for debugging
-                */
-               if (lstate == lastlstate)
-                       ipath_cdbg(LINKVERB, "Unchanged from last: %s "
-                               "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
-               else
-                       ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
-                                 dd->ipath_unit, ib_linkstate(dd, ibcs),
-                                 ipath_ibcstatus_str[ltstate],  ibstate);
-       } else { /* down */
-               if (dd->ipath_flags & IPATH_LINKACTIVE)
-                       signal_ib_event(dd, IB_EVENT_PORT_ERR);
-               dd->ipath_flags |= IPATH_LINKDOWN;
-               dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-                                    | IPATH_LINKACTIVE |
-                                    IPATH_LINKARMED);
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-               dd->ipath_lli_counter = 0;
-
-               if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
-                       ipath_cdbg(VERBOSE, "Unit %u link state down "
-                                  "(state 0x%x), from %s\n",
-                                  dd->ipath_unit, lstate,
-                                  ib_linkstate(dd, dd->ipath_lastibcstat));
-               else
-                       ipath_cdbg(LINKVERB, "Unit %u link state changed "
-                                  "to %s (0x%x) from down (%x)\n",
-                                  dd->ipath_unit,
-                                  ipath_ibcstatus_str[ltstate],
-                                  ibstate, lastlstate);
-       }
-
-skip_ibchange:
-       dd->ipath_lastibcstat = ibcs;
-done:
-       return;
-}
-
-static void handle_supp_msgs(struct ipath_devdata *dd,
-                            unsigned supp_msgs, char *msg, u32 msgsz)
-{
-       /*
-        * Print the message unless it's ibc status change only, which
-        * happens so often we never want to count it.
-        */
-       if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
-               int iserr;
-               ipath_err_t mask;
-               iserr = ipath_decode_err(dd, msg, msgsz,
-                                        dd->ipath_lasterror &
-                                        ~INFINIPATH_E_IBSTATUSCHANGED);
-
-               mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-                       INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
-
-               /* if we're in debug, then don't mask SDMADISABLED msgs */
-               if (ipath_debug & __IPATH_DBG)
-                       mask &= ~INFINIPATH_E_SDMADISABLED;
-
-               if (dd->ipath_lasterror & ~mask)
-                       ipath_dev_err(dd, "Suppressed %u messages for "
-                                     "fast-repeating errors (%s) (%llx)\n",
-                                     supp_msgs, msg,
-                                     (unsigned long long)
-                                     dd->ipath_lasterror);
-               else {
-                       /*
-                        * rcvegrfull and rcvhdrqfull are "normal", for some
-                        * types of processes (mostly benchmarks) that send
-                        * huge numbers of messages, while not processing
-                        * them. So only complain about these at debug
-                        * level.
-                        */
-                       if (iserr)
-                               ipath_dbg("Suppressed %u messages for %s\n",
-                                         supp_msgs, msg);
-                       else
-                               ipath_cdbg(ERRPKT,
-                                       "Suppressed %u messages for %s\n",
-                                         supp_msgs, msg);
-               }
-       }
-}
-
-static unsigned handle_frequent_errors(struct ipath_devdata *dd,
-                                      ipath_err_t errs, char *msg,
-                                      u32 msgsz, int *noprint)
-{
-       unsigned long nc;
-       static unsigned long nextmsg_time;
-       static unsigned nmsgs, supp_msgs;
-
-       /*
-        * Throttle back "fast" messages to no more than 10 per 5 seconds.
-        * This isn't perfect, but it's a reasonable heuristic. If we get
-        * more than 10, give a 6x longer delay.
-        */
-       nc = jiffies;
-       if (nmsgs > 10) {
-               if (time_before(nc, nextmsg_time)) {
-                       *noprint = 1;
-                       if (!supp_msgs++)
-                               nextmsg_time = nc + HZ * 3;
-               } else if (supp_msgs) {
-                       handle_supp_msgs(dd, supp_msgs, msg, msgsz);
-                       supp_msgs = 0;
-                       nmsgs = 0;
-               }
-       } else if (!nmsgs++ || time_after(nc, nextmsg_time)) {
-               nextmsg_time = nc + HZ / 2;
-       }
-
-       return supp_msgs;
-}
-
-static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
-       unsigned long flags;
-       int expected;
-
-       if (ipath_debug & __IPATH_DBG) {
-               char msg[128];
-               ipath_decode_err(dd, msg, sizeof msg, errs &
-                       INFINIPATH_E_SDMAERRS);
-               ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
-       }
-       if (ipath_debug & __IPATH_VERBDBG) {
-               unsigned long tl, hd, status, lengen;
-               tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
-               hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
-               status = ipath_read_kreg64(dd
-                       , dd->ipath_kregs->kr_senddmastatus);
-               lengen = ipath_read_kreg64(dd,
-                       dd->ipath_kregs->kr_senddmalengen);
-               ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
-                       "lengen 0x%lx\n", tl, hd, status, lengen);
-       }
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-       expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       if (!expected)
-               ipath_cancel_sends(dd, 1);
-}
-
-static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
-{
-       unsigned long flags;
-       int expected;
-
-       if ((istat & INFINIPATH_I_SDMAINT) &&
-           !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               ipath_sdma_intr(dd);
-
-       if (istat & INFINIPATH_I_SDMADISABLED) {
-               expected = test_bit(IPATH_SDMA_ABORTING,
-                       &dd->ipath_sdma_status);
-               ipath_dbg("%s SDmaDisabled intr\n",
-                       expected ? "expected" : "unexpected");
-               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-               __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-               if (!expected)
-                       ipath_cancel_sends(dd, 1);
-               if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-                       tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-       }
-}
-
-static int handle_hdrq_full(struct ipath_devdata *dd)
-{
-       int chkerrpkts = 0;
-       u32 hd, tl;
-       u32 i;
-
-       ipath_stats.sps_hdrqfull++;
-       for (i = 0; i < dd->ipath_cfgports; i++) {
-               struct ipath_portdata *pd = dd->ipath_pd[i];
-
-               if (i == 0) {
-                       /*
-                        * For kernel receive queues, we just want to know
-                        * if there are packets in the queue that we can
-                        * process.
-                        */
-                       if (pd->port_head != ipath_get_hdrqtail(pd))
-                               chkerrpkts |= 1 << i;
-                       continue;
-               }
-
-               /* Skip if user context is not open */
-               if (!pd || !pd->port_cnt)
-                       continue;
-
-               /* Don't report the same point multiple times. */
-               if (dd->ipath_flags & IPATH_NODMA_RTAIL)
-                       tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
-               else
-                       tl = ipath_get_rcvhdrtail(pd);
-               if (tl == pd->port_lastrcvhdrqtail)
-                       continue;
-
-               hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
-               if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
-                       pd->port_lastrcvhdrqtail = tl;
-                       pd->port_hdrqfull++;
-                       /* flush hdrqfull so that poll() sees it */
-                       wmb();
-                       wake_up_interruptible(&pd->port_wait);
-               }
-       }
-
-       return chkerrpkts;
-}
-
-static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
-       char msg[128];
-       u64 ignore_this_time = 0;
-       u64 iserr = 0;
-       int chkerrpkts = 0, noprint = 0;
-       unsigned supp_msgs;
-       int log_idx;
-
-       /*
-        * don't report errors that are masked, either at init
-        * (not set in ipath_errormask), or temporarily (set in
-        * ipath_maskederrs)
-        */
-       errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
-
-       supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
-               &noprint);
-
-       /* do these first, they are most important */
-       if (errs & INFINIPATH_E_HARDWARE) {
-               /* reuse same msg buf */
-               dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
-       } else {
-               u64 mask;
-               for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
-                       mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
-                       if (errs & mask)
-                               ipath_inc_eeprom_err(dd, log_idx, 1);
-               }
-       }
-
-       if (errs & INFINIPATH_E_SDMAERRS)
-               handle_sdma_errors(dd, errs);
-
-       if (!noprint && (errs & ~dd->ipath_e_bitsextant))
-               ipath_dev_err(dd, "error interrupt with unknown errors "
-                             "%llx set\n", (unsigned long long)
-                             (errs & ~dd->ipath_e_bitsextant));
-
-       if (errs & E_SUM_ERRS)
-               ignore_this_time = handle_e_sum_errs(dd, errs);
-       else if ((errs & E_SUM_LINK_PKTERRS) &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               /*
-                * This can happen when SMA is trying to bring the link
-                * up, but the IB link changes state at the "wrong" time.
-                * The IB logic then complains that the packet isn't
-                * valid.  We don't want to confuse people, so we just
-                * don't print them, except at debug
-                */
-               ipath_dbg("Ignoring packet errors %llx, because link not "
-                         "ACTIVE\n", (unsigned long long) errs);
-               ignore_this_time = errs & E_SUM_LINK_PKTERRS;
-       }
-
-       if (supp_msgs == 250000) {
-               int s_iserr;
-               /*
-                * It's not entirely reasonable assuming that the errors set
-                * in the last clear period are all responsible for the
-                * problem, but the alternative is to assume it's the only
-                * ones on this particular interrupt, which also isn't great
-                */
-               dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
-
-               dd->ipath_errormask &= ~dd->ipath_maskederrs;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                                dd->ipath_errormask);
-               s_iserr = ipath_decode_err(dd, msg, sizeof msg,
-                                          dd->ipath_maskederrs);
-
-               if (dd->ipath_maskederrs &
-                   ~(INFINIPATH_E_RRCVEGRFULL |
-                     INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
-                       ipath_dev_err(dd, "Temporarily disabling "
-                           "error(s) %llx reporting; too frequent (%s)\n",
-                               (unsigned long long) dd->ipath_maskederrs,
-                               msg);
-               else {
-                       /*
-                        * rcvegrfull and rcvhdrqfull are "normal",
-                        * for some types of processes (mostly benchmarks)
-                        * that send huge numbers of messages, while not
-                        * processing them.  So only complain about
-                        * these at debug level.
-                        */
-                       if (s_iserr)
-                               ipath_dbg("Temporarily disabling reporting "
-                                   "too frequent queue full errors (%s)\n",
-                                   msg);
-                       else
-                               ipath_cdbg(ERRPKT,
-                                   "Temporarily disabling reporting too"
-                                   " frequent packet errors (%s)\n",
-                                   msg);
-               }
-
-               /*
-                * Re-enable the masked errors after around 3 minutes.  in
-                * ipath_get_faststats().  If we have a series of fast
-                * repeating but different errors, the interval will keep
-                * stretching out, but that's OK, as that's pretty
-                * catastrophic.
-                */
-               dd->ipath_unmasktime = jiffies + HZ * 180;
-       }
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
-       if (ignore_this_time)
-               errs &= ~ignore_this_time;
-       if (errs & ~dd->ipath_lasterror) {
-               errs &= ~dd->ipath_lasterror;
-               /* never suppress duplicate hwerrors or ibstatuschange */
-               dd->ipath_lasterror |= errs &
-                       ~(INFINIPATH_E_HARDWARE |
-                         INFINIPATH_E_IBSTATUSCHANGED);
-       }
-
-       if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
-               dd->ipath_spectriggerhit++;
-               ipath_dbg("%lu special trigger hits\n",
-                       dd->ipath_spectriggerhit);
-       }
-
-       /* likely due to cancel; so suppress message unless verbose */
-       if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
-               time_after(dd->ipath_lastcancel, jiffies)) {
-               /* armlaunch takes precedence; it often causes both. */
-               ipath_cdbg(VERBOSE,
-                       "Suppressed %s error (%llx) after sendbuf cancel\n",
-                       (errs &  INFINIPATH_E_SPIOARMLAUNCH) ?
-                       "armlaunch" : "sendpktlen", (unsigned long long)errs);
-               errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
-       }
-
-       if (!errs)
-               return 0;
-
-       if (!noprint) {
-               ipath_err_t mask;
-               /*
-                * The ones we mask off are handled specially below
-                * or above.  Also mask SDMADISABLED by default as it
-                * is too chatty.
-                */
-               mask = INFINIPATH_E_IBSTATUSCHANGED |
-                       INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-                       INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
-
-               /* if we're in debug, then don't mask SDMADISABLED msgs */
-               if (ipath_debug & __IPATH_DBG)
-                       mask &= ~INFINIPATH_E_SDMADISABLED;
-
-               ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
-       } else
-               /* so we don't need if (!noprint) at strlcat's below */
-               *msg = 0;
-
-       if (errs & E_SUM_PKTERRS) {
-               ipath_stats.sps_pkterrs++;
-               chkerrpkts = 1;
-       }
-       if (errs & E_SUM_ERRS)
-               ipath_stats.sps_errs++;
-
-       if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
-               ipath_stats.sps_crcerrs++;
-               chkerrpkts = 1;
-       }
-       iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
-
-
-       /*
-        * We don't want to print these two as they happen, or we can make
-        * the situation even worse, because it takes so long to print
-        * messages to serial consoles.  Kernel ports get printed from
-        * fast_stats, no more than every 5 seconds, user ports get printed
-        * on close
-        */
-       if (errs & INFINIPATH_E_RRCVHDRFULL)
-               chkerrpkts |= handle_hdrq_full(dd);
-       if (errs & INFINIPATH_E_RRCVEGRFULL) {
-               struct ipath_portdata *pd = dd->ipath_pd[0];
-
-               /*
-                * since this is of less importance and not likely to
-                * happen without also getting hdrfull, only count
-                * occurrences; don't check each port (or even the kernel
-                * vs user)
-                */
-               ipath_stats.sps_etidfull++;
-               if (pd->port_head != ipath_get_hdrqtail(pd))
-                       chkerrpkts |= 1;
-       }
-
-       /*
-        * do this before IBSTATUSCHANGED, in case both bits set in a single
-        * interrupt; we want the STATUSCHANGE to "win", so we do our
-        * internal copy of state machine correctly
-        */
-       if (errs & INFINIPATH_E_RIBLOSTLINK) {
-               /*
-                * force through block below
-                */
-               errs |= INFINIPATH_E_IBSTATUSCHANGED;
-               ipath_stats.sps_iblink++;
-               dd->ipath_flags |= IPATH_LINKDOWN;
-               dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-                                    | IPATH_LINKARMED | IPATH_LINKACTIVE);
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-
-               ipath_dbg("Lost link, link now down (%s)\n",
-                       ipath_ibcstatus_str[ipath_read_kreg64(dd,
-                       dd->ipath_kregs->kr_ibcstatus) & 0xf]);
-       }
-       if (errs & INFINIPATH_E_IBSTATUSCHANGED)
-               handle_e_ibstatuschanged(dd, errs);
-
-       if (errs & INFINIPATH_E_RESET) {
-               if (!noprint)
-                       ipath_dev_err(dd, "Got reset, requires re-init "
-                                     "(unload and reload driver)\n");
-               dd->ipath_flags &= ~IPATH_INITTED;      /* needs re-init */
-               /* mark as having had error */
-               *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
-       }
-
-       if (!noprint && *msg) {
-               if (iserr)
-                       ipath_dev_err(dd, "%s error\n", msg);
-       }
-       if (dd->ipath_state_wanted & dd->ipath_flags) {
-               ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
-                          "waking\n", dd->ipath_state_wanted,
-                          dd->ipath_flags);
-               wake_up_interruptible(&ipath_state_wait);
-       }
-
-       return chkerrpkts;
-}
-
-/*
- * try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- */
-void ipath_clear_freeze(struct ipath_devdata *dd)
-{
-       /* disable error interrupts, to avoid confusion */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
-
-       /* also disable interrupts; errormask is sometimes overwriten */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
-       ipath_cancel_sends(dd, 1);
-
-       /* clear the freeze, and be sure chip saw it */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-       /* force in-memory update now we are out of freeze */
-       ipath_force_pio_avail_update(dd);
-
-       /*
-        * force new interrupt if any hwerr, error or interrupt bits are
-        * still set, and clear "safe" send packet errors related to freeze
-        * and cancelling sends.  Re-enable error interrupts before possible
-        * force of re-interrupt on pending interrupts.
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-               E_SPKT_ERRS_IGNORE);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-}
-
-
-/* this is separate to allow for better optimization of ipath_intr() */
-
-static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
-{
-       /*
-        * sometimes happen during driver init and unload, don't want
-        * to process any interrupts at that point
-        */
-
-       /* this is just a bandaid, not a fix, if something goes badly
-        * wrong */
-       if (++*unexpectp > 100) {
-               if (++*unexpectp > 105) {
-                       /*
-                        * ok, we must be taking somebody else's interrupts,
-                        * due to a messed up mptable and/or PIRQ table, so
-                        * unregister the interrupt.  We've seen this during
-                        * linuxbios development work, and it may happen in
-                        * the future again.
-                        */
-                       if (dd->pcidev && dd->ipath_irq) {
-                               ipath_dev_err(dd, "Now %u unexpected "
-                                             "interrupts, unregistering "
-                                             "interrupt handler\n",
-                                             *unexpectp);
-                               ipath_dbg("free_irq of irq %d\n",
-                                         dd->ipath_irq);
-                               dd->ipath_f_free_irq(dd);
-                       }
-               }
-               if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
-                       ipath_dev_err(dd, "%u unexpected interrupts, "
-                                     "disabling interrupts completely\n",
-                                     *unexpectp);
-                       /*
-                        * disable all interrupts, something is very wrong
-                        */
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
-                                        0ULL);
-               }
-       } else if (*unexpectp > 1)
-               ipath_dbg("Interrupt when not ready, should not happen, "
-                         "ignoring\n");
-}
-
-static noinline void ipath_bad_regread(struct ipath_devdata *dd)
-{
-       static int allbits;
-
-       /* separate routine, for better optimization of ipath_intr() */
-
-       /*
-        * We print the message and disable interrupts, in hope of
-        * having a better chance of debugging the problem.
-        */
-       ipath_dev_err(dd,
-                     "Read of interrupt status failed (all bits set)\n");
-       if (allbits++) {
-               /* disable all interrupts, something is very wrong */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-               if (allbits == 2) {
-                       ipath_dev_err(dd, "Still bad interrupt status, "
-                                     "unregistering interrupt\n");
-                       dd->ipath_f_free_irq(dd);
-               } else if (allbits > 2) {
-                       if ((allbits % 10000) == 0)
-                               printk(".");
-               } else
-                       ipath_dev_err(dd, "Disabling interrupts, "
-                                     "multiple errors\n");
-       }
-}
-
-static void handle_layer_pioavail(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       int ret;
-
-       ret = ipath_ib_piobufavail(dd->verbs_dev);
-       if (ret > 0)
-               goto set;
-
-       return;
-set:
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-/*
- * Handle receive interrupts for user ports; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll
- */
-static void handle_urcv(struct ipath_devdata *dd, u64 istat)
-{
-       u64 portr;
-       int i;
-       int rcvdint = 0;
-
-       /*
-        * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
-        * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
-        * would both like timely updates of the bits so that
-        * we don't pass them by unnecessarily.  the rmb()
-        * here ensures that we see them promptly -- the
-        * corresponding wmb()'s are in ipath_poll_urgent()
-        * and ipath_poll_next()...
-        */
-       rmb();
-       portr = ((istat >> dd->ipath_i_rcvavail_shift) &
-                dd->ipath_i_rcvavail_mask) |
-               ((istat >> dd->ipath_i_rcvurg_shift) &
-                dd->ipath_i_rcvurg_mask);
-       for (i = 1; i < dd->ipath_cfgports; i++) {
-               struct ipath_portdata *pd = dd->ipath_pd[i];
-
-               if (portr & (1 << i) && pd && pd->port_cnt) {
-                       if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
-                                              &pd->port_flag)) {
-                               clear_bit(i + dd->ipath_r_intravail_shift,
-                                         &dd->ipath_rcvctrl);
-                               wake_up_interruptible(&pd->port_wait);
-                               rcvdint = 1;
-                       } else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
-                                                     &pd->port_flag)) {
-                               pd->port_urgent++;
-                               wake_up_interruptible(&pd->port_wait);
-                       }
-               }
-       }
-       if (rcvdint) {
-               /* only want to take one interrupt, so turn off the rcv
-                * interrupt for all the ports that we set the rcv_waiting
-                * (but never for kernel port)
-                */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                                dd->ipath_rcvctrl);
-       }
-}
-
-irqreturn_t ipath_intr(int irq, void *data)
-{
-       struct ipath_devdata *dd = data;
-       u64 istat, chk0rcv = 0;
-       ipath_err_t estat = 0;
-       irqreturn_t ret;
-       static unsigned unexpected = 0;
-       u64 kportrbits;
-
-       ipath_stats.sps_ints++;
-
-       if (dd->ipath_int_counter != (u32) -1)
-               dd->ipath_int_counter++;
-
-       if (!(dd->ipath_flags & IPATH_PRESENT)) {
-               /*
-                * This return value is not great, but we do not want the
-                * interrupt core code to remove our interrupt handler
-                * because we don't appear to be handling an interrupt
-                * during a chip reset.
-                */
-               return IRQ_HANDLED;
-       }
-
-       /*
-        * this needs to be flags&initted, not statusp, so we keep
-        * taking interrupts even after link goes down, etc.
-        * Also, we *must* clear the interrupt at some point, or we won't
-        * take it again, which can be real bad for errors, etc...
-        */
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               ipath_bad_intr(dd, &unexpected);
-               ret = IRQ_NONE;
-               goto bail;
-       }
-
-       istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
-
-       if (unlikely(!istat)) {
-               ipath_stats.sps_nullintr++;
-               ret = IRQ_NONE; /* not our interrupt, or already handled */
-               goto bail;
-       }
-       if (unlikely(istat == -1)) {
-               ipath_bad_regread(dd);
-               /* don't know if it was our interrupt or not */
-               ret = IRQ_NONE;
-               goto bail;
-       }
-
-       if (unexpected)
-               unexpected = 0;
-
-       if (unlikely(istat & ~dd->ipath_i_bitsextant))
-               ipath_dev_err(dd,
-                             "interrupt with unknown interrupts %Lx set\n",
-                             (unsigned long long)
-                             istat & ~dd->ipath_i_bitsextant);
-       else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
-               ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
-                       (unsigned long long) istat);
-
-       if (istat & INFINIPATH_I_ERROR) {
-               ipath_stats.sps_errints++;
-               estat = ipath_read_kreg64(dd,
-                                         dd->ipath_kregs->kr_errorstatus);
-               if (!estat)
-                       dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
-                                "but no error bits set!\n",
-                                (unsigned long long) istat);
-               else if (estat == -1LL)
-                       /*
-                        * should we try clearing all, or hope next read
-                        * works?
-                        */
-                       ipath_dev_err(dd, "Read of error status failed "
-                                     "(all bits set); ignoring\n");
-               else
-                       chk0rcv |= handle_errors(dd, estat);
-       }
-
-       if (istat & INFINIPATH_I_GPIO) {
-               /*
-                * GPIO interrupts fall in two broad classes:
-                * GPIO_2 indicates (on some HT4xx boards) that a packet
-                *        has arrived for Port 0. Checking for this
-                *        is controlled by flag IPATH_GPIO_INTR.
-                * GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
-                *        errors that we need to count. Checking for this
-                *        is controlled by flag IPATH_GPIO_ERRINTRS.
-                */
-               u32 gpiostatus;
-               u32 to_clear = 0;
-
-               gpiostatus = ipath_read_kreg32(
-                       dd, dd->ipath_kregs->kr_gpio_status);
-               /* First the error-counter case. */
-               if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
-                   (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
-                       /* want to clear the bits we see asserted. */
-                       to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
-
-                       /*
-                        * Count appropriately, clear bits out of our copy,
-                        * as they have been "handled".
-                        */
-                       if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
-                               ipath_dbg("FlowCtl on UnsupVL\n");
-                               dd->ipath_rxfc_unsupvl_errs++;
-                       }
-                       if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
-                               ipath_dbg("Overrun Threshold exceeded\n");
-                               dd->ipath_overrun_thresh_errs++;
-                       }
-                       if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
-                               ipath_dbg("Local Link Integrity error\n");
-                               dd->ipath_lli_errs++;
-                       }
-                       gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
-               }
-               /* Now the Port0 Receive case */
-               if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
-                   (dd->ipath_flags & IPATH_GPIO_INTR)) {
-                       /*
-                        * GPIO status bit 2 is set, and we expected it.
-                        * clear it and indicate in p0bits.
-                        * This probably only happens if a Port0 pkt
-                        * arrives at _just_ the wrong time, and we
-                        * handle that by seting chk0rcv;
-                        */
-                       to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
-                       gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
-                       chk0rcv = 1;
-               }
-               if (gpiostatus) {
-                       /*
-                        * Some unexpected bits remain. If they could have
-                        * caused the interrupt, complain and clear.
-                        * To avoid repetition of this condition, also clear
-                        * the mask. It is almost certainly due to error.
-                        */
-                       const u32 mask = (u32) dd->ipath_gpio_mask;
-
-                       if (mask & gpiostatus) {
-                               ipath_dbg("Unexpected GPIO IRQ bits %x\n",
-                                 gpiostatus & mask);
-                               to_clear |= (gpiostatus & mask);
-                               dd->ipath_gpio_mask &= ~(gpiostatus & mask);
-                               ipath_write_kreg(dd,
-                                       dd->ipath_kregs->kr_gpio_mask,
-                                       dd->ipath_gpio_mask);
-                       }
-               }
-               if (to_clear) {
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
-                                       (u64) to_clear);
-               }
-       }
-
-       /*
-        * Clear the interrupt bits we found set, unless they are receive
-        * related, in which case we already cleared them above, and don't
-        * want to clear them again, because we might lose an interrupt.
-        * Clear it early, so we "know" know the chip will have seen this by
-        * the time we process the queue, and will re-interrupt if necessary.
-        * The processor itself won't take the interrupt again until we return.
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
-
-       /*
-        * Handle kernel receive queues before checking for pio buffers
-        * available since receives can overflow; piobuf waiters can afford
-        * a few extra cycles, since they were waiting anyway, and user's
-        * waiting for receive are at the bottom.
-        */
-       kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
-               (1ULL << dd->ipath_i_rcvurg_shift);
-       if (chk0rcv || (istat & kportrbits)) {
-               istat &= ~kportrbits;
-               ipath_kreceive(dd->ipath_pd[0]);
-       }
-
-       if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
-                    (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
-               handle_urcv(dd, istat);
-
-       if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
-               handle_sdma_intr(dd, istat);
-
-       if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-               /* always process; sdma verbs uses PIO for acks and VL15  */
-               handle_layer_pioavail(dd);
-       }
-
-       ret = IRQ_HANDLED;
-
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_kernel.h b/drivers/staging/rdma/ipath/ipath_kernel.h
deleted file mode 100644 (file)
index 66c934a..0000000
+++ /dev/null
@@ -1,1374 +0,0 @@
-#ifndef _IPATH_KERNEL_H
-#define _IPATH_KERNEL_H
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for infinipath kernel code
- * ipath_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <asm/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_common.h"
-#include "ipath_debug.h"
-#include "ipath_registers.h"
-
-/* only s/w major version of InfiniPath we can handle */
-#define IPATH_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define IPATH_CHIP_VERS_MIN 0U
-
-/* temporary, maybe always */
-extern struct infinipath_stats ipath_stats;
-
-#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
-/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define IPATH_EEP_LOG_CNT (4)
-struct ipath_eep_log_mask {
-       u64 errs_to_log;
-       u64 hwerrs_to_log;
-};
-
-struct ipath_portdata {
-       void **port_rcvegrbuf;
-       dma_addr_t *port_rcvegrbuf_phys;
-       /* rcvhdrq base, needs mmap before useful */
-       void *port_rcvhdrq;
-       /* kernel virtual address where hdrqtail is updated */
-       void *port_rcvhdrtail_kvaddr;
-       /*
-        * temp buffer for expected send setup, allocated at open, instead
-        * of each setup call
-        */
-       void *port_tid_pg_list;
-       /* when waiting for rcv or pioavail */
-       wait_queue_head_t port_wait;
-       /*
-        * rcvegr bufs base, physical, must fit
-        * in 44 bits so 32 bit programs mmap64 44 bit works)
-        */
-       dma_addr_t port_rcvegr_phys;
-       /* mmap of hdrq, must fit in 44 bits */
-       dma_addr_t port_rcvhdrq_phys;
-       dma_addr_t port_rcvhdrqtailaddr_phys;
-       /*
-        * number of opens (including slave subports) on this instance
-        * (ignoring forks, dup, etc. for now)
-        */
-       int port_cnt;
-       /*
-        * how much space to leave at start of eager TID entries for
-        * protocol use, on each TID
-        */
-       /* instead of calculating it */
-       unsigned port_port;
-       /* non-zero if port is being shared. */
-       u16 port_subport_cnt;
-       /* non-zero if port is being shared. */
-       u16 port_subport_id;
-       /* number of pio bufs for this port (all procs, if shared) */
-       u32 port_piocnt;
-       /* first pio buffer for this port */
-       u32 port_pio_base;
-       /* chip offset of PIO buffers for this port */
-       u32 port_piobufs;
-       /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
-       u32 port_rcvegrbuf_chunks;
-       /* how many egrbufs per chunk */
-       u32 port_rcvegrbufs_perchunk;
-       /* order for port_rcvegrbuf_pages */
-       size_t port_rcvegrbuf_size;
-       /* rcvhdrq size (for freeing) */
-       size_t port_rcvhdrq_size;
-       /* next expected TID to check when looking for free */
-       u32 port_tidcursor;
-       /* next expected TID to check */
-       unsigned long port_flag;
-       /* what happened */
-       unsigned long int_flag;
-       /* WAIT_RCV that timed out, no interrupt */
-       u32 port_rcvwait_to;
-       /* WAIT_PIO that timed out, no interrupt */
-       u32 port_piowait_to;
-       /* WAIT_RCV already happened, no wait */
-       u32 port_rcvnowait;
-       /* WAIT_PIO already happened, no wait */
-       u32 port_pionowait;
-       /* total number of rcvhdrqfull errors */
-       u32 port_hdrqfull;
-       /*
-        * Used to suppress multiple instances of same
-        * port staying stuck at same point.
-        */
-       u32 port_lastrcvhdrqtail;
-       /* saved total number of rcvhdrqfull errors for poll edge trigger */
-       u32 port_hdrqfull_poll;
-       /* total number of polled urgent packets */
-       u32 port_urgent;
-       /* saved total number of polled urgent packets for poll edge trigger */
-       u32 port_urgent_poll;
-       /* pid of process using this port */
-       struct pid *port_pid;
-       struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
-       /* same size as task_struct .comm[] */
-       char port_comm[TASK_COMM_LEN];
-       /* pkeys set by this use of this port */
-       u16 port_pkeys[4];
-       /* so file ops can get at unit */
-       struct ipath_devdata *port_dd;
-       /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
-       void *subport_uregbase;
-       /* An array of pages for the eager receive buffers * N */
-       void *subport_rcvegrbuf;
-       /* An array of pages for the eager header queue entries * N */
-       void *subport_rcvhdr_base;
-       /* The version of the library which opened this port */
-       u32 userversion;
-       /* Bitmask of active slaves */
-       u32 active_slaves;
-       /* Type of packets or conditions we want to poll for */
-       u16 poll_type;
-       /* port rcvhdrq head offset */
-       u32 port_head;
-       /* receive packet sequence counter */
-       u32 port_seq_cnt;
-};
-
-struct sk_buff;
-struct ipath_sge_state;
-struct ipath_verbs_txreq;
-
-/*
- * control information for layered drivers
- */
-struct _ipath_layer {
-       void *l_arg;
-};
-
-struct ipath_skbinfo {
-       struct sk_buff *skb;
-       dma_addr_t phys;
-};
-
-struct ipath_sdma_txreq {
-       int                 flags;
-       int                 sg_count;
-       union {
-               struct scatterlist *sg;
-               void *map_addr;
-       };
-       void              (*callback)(void *, int);
-       void               *callback_cookie;
-       int                 callback_status;
-       u16                 start_idx;  /* sdma private */
-       u16                 next_descq_idx;  /* sdma private */
-       struct list_head    list;       /* sdma private */
-};
-
-struct ipath_sdma_desc {
-       __le64 qw[2];
-};
-
-#define IPATH_SDMA_TXREQ_F_USELARGEBUF  0x1
-#define IPATH_SDMA_TXREQ_F_HEADTOHOST   0x2
-#define IPATH_SDMA_TXREQ_F_INTREQ       0x4
-#define IPATH_SDMA_TXREQ_F_FREEBUF      0x8
-#define IPATH_SDMA_TXREQ_F_FREEDESC     0x10
-#define IPATH_SDMA_TXREQ_F_VL15         0x20
-
-#define IPATH_SDMA_TXREQ_S_OK        0
-#define IPATH_SDMA_TXREQ_S_SENDERROR 1
-#define IPATH_SDMA_TXREQ_S_ABORTED   2
-#define IPATH_SDMA_TXREQ_S_SHUTDOWN  3
-
-#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG    (1ull << 63)
-#define IPATH_SDMA_STATUS_ABORT_IN_PROG                        (1ull << 62)
-#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE         (1ull << 61)
-#define IPATH_SDMA_STATUS_SCB_EMPTY                    (1ull << 30)
-
-/* max dwords in small buffer packet */
-#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
-
-/*
- * Possible IB config parameters for ipath_f_get/set_ib_cfg()
- */
-#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
-#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
-#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
-#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
-#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
-#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
-#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
-#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
-#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
-#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
-#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
-
-
-struct ipath_devdata {
-       struct list_head ipath_list;
-
-       struct ipath_kregs const *ipath_kregs;
-       struct ipath_cregs const *ipath_cregs;
-
-       /* mem-mapped pointer to base of chip regs */
-       u64 __iomem *ipath_kregbase;
-       /* end of mem-mapped chip space; range checking */
-       u64 __iomem *ipath_kregend;
-       /* physical address of chip for io_remap, etc. */
-       unsigned long ipath_physaddr;
-       /* base of memory alloced for ipath_kregbase, for free */
-       u64 *ipath_kregalloc;
-       /* ipath_cfgports pointers */
-       struct ipath_portdata **ipath_pd;
-       /* sk_buffs used by port 0 eager receive queue */
-       struct ipath_skbinfo *ipath_port0_skbinfo;
-       /* kvirt address of 1st 2k pio buffer */
-       void __iomem *ipath_pio2kbase;
-       /* kvirt address of 1st 4k pio buffer */
-       void __iomem *ipath_pio4kbase;
-       /*
-        * points to area where PIOavail registers will be DMA'ed.
-        * Has to be on a page of it's own, because the page will be
-        * mapped into user program space.  This copy is *ONLY* ever
-        * written by DMA, not by the driver!  Need a copy per device
-        * when we get to multiple devices
-        */
-       volatile __le64 *ipath_pioavailregs_dma;
-       /* physical address where updates occur */
-       dma_addr_t ipath_pioavailregs_phys;
-       struct _ipath_layer ipath_layer;
-       /* setup intr */
-       int (*ipath_f_intrsetup)(struct ipath_devdata *);
-       /* fallback to alternate interrupt type if possible */
-       int (*ipath_f_intr_fallback)(struct ipath_devdata *);
-       /* setup on-chip bus config */
-       int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
-       /* hard reset chip */
-       int (*ipath_f_reset)(struct ipath_devdata *);
-       int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
-                                    size_t);
-       void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
-       void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
-                                       size_t);
-       void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
-       int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
-       int (*ipath_f_early_init)(struct ipath_devdata *);
-       void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
-       void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
-                               u32, unsigned long);
-       void (*ipath_f_tidtemplate)(struct ipath_devdata *);
-       void (*ipath_f_cleanup)(struct ipath_devdata *);
-       void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
-       /* fill out chip-specific fields */
-       int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
-       /* free irq */
-       void (*ipath_f_free_irq)(struct ipath_devdata *);
-       struct ipath_message_header *(*ipath_f_get_msgheader)
-                                       (struct ipath_devdata *, __le32 *);
-       void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
-       int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
-       int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
-       void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
-       void (*ipath_f_read_counters)(struct ipath_devdata *,
-                                       struct infinipath_counters *);
-       void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
-       /* per chip actions needed for IB Link up/down changes */
-       int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
-
-       unsigned ipath_lastegr_idx;
-       struct ipath_ibdev *verbs_dev;
-       struct timer_list verbs_timer;
-       /* total dwords sent (summed from counter) */
-       u64 ipath_sword;
-       /* total dwords rcvd (summed from counter) */
-       u64 ipath_rword;
-       /* total packets sent (summed from counter) */
-       u64 ipath_spkts;
-       /* total packets rcvd (summed from counter) */
-       u64 ipath_rpkts;
-       /* ipath_statusp initially points to this. */
-       u64 _ipath_status;
-       /* GUID for this interface, in network order */
-       __be64 ipath_guid;
-       /*
-        * aggregrate of error bits reported since last cleared, for
-        * limiting of error reporting
-        */
-       ipath_err_t ipath_lasterror;
-       /*
-        * aggregrate of error bits reported since last cleared, for
-        * limiting of hwerror reporting
-        */
-       ipath_err_t ipath_lasthwerror;
-       /* errors masked because they occur too fast */
-       ipath_err_t ipath_maskederrs;
-       u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
-       /* these 5 fields are used to establish deltas for IB Symbol
-        * errors and linkrecovery errors. They can be reported on
-        * some chips during link negotiation prior to INIT, and with
-        * DDR when faking DDR negotiations with non-IBTA switches.
-        * The chip counters are adjusted at driver unload if there is
-        * a non-zero delta.
-        */
-       u64 ibdeltainprog;
-       u64 ibsymdelta;
-       u64 ibsymsnap;
-       u64 iblnkerrdelta;
-       u64 iblnkerrsnap;
-
-       /* time in jiffies at which to re-enable maskederrs */
-       unsigned long ipath_unmasktime;
-       /* count of egrfull errors, combined for all ports */
-       u64 ipath_last_tidfull;
-       /* for ipath_qcheck() */
-       u64 ipath_lastport0rcv_cnt;
-       /* template for writing TIDs  */
-       u64 ipath_tidtemplate;
-       /* value to write to free TIDs */
-       u64 ipath_tidinvalid;
-       /* IBA6120 rcv interrupt setup */
-       u64 ipath_rhdrhead_intr_off;
-
-       /* size of memory at ipath_kregbase */
-       u32 ipath_kregsize;
-       /* number of registers used for pioavail */
-       u32 ipath_pioavregs;
-       /* IPATH_POLL, etc. */
-       u32 ipath_flags;
-       /* ipath_flags driver is waiting for */
-       u32 ipath_state_wanted;
-       /* last buffer for user use, first buf for kernel use is this
-        * index. */
-       u32 ipath_lastport_piobuf;
-       /* is a stats timer active */
-       u32 ipath_stats_timer_active;
-       /* number of interrupts for this device -- saturates... */
-       u32 ipath_int_counter;
-       /* dwords sent read from counter */
-       u32 ipath_lastsword;
-       /* dwords received read from counter */
-       u32 ipath_lastrword;
-       /* sent packets read from counter */
-       u32 ipath_lastspkts;
-       /* received packets read from counter */
-       u32 ipath_lastrpkts;
-       /* pio bufs allocated per port */
-       u32 ipath_pbufsport;
-       /* if remainder on bufs/port, ports < extrabuf get 1 extra */
-       u32 ipath_ports_extrabuf;
-       u32 ipath_pioupd_thresh; /* update threshold, some chips */
-       /*
-        * number of ports configured as max; zero is set to number chip
-        * supports, less gives more pio bufs/port, etc.
-        */
-       u32 ipath_cfgports;
-       /* count of port 0 hdrqfull errors */
-       u32 ipath_p0_hdrqfull;
-       /* port 0 number of receive eager buffers */
-       u32 ipath_p0_rcvegrcnt;
-
-       /*
-        * index of last piobuffer we used.  Speeds up searching, by
-        * starting at this point.  Doesn't matter if multiple cpu's use and
-        * update, last updater is only write that matters.  Whenever it
-        * wraps, we update shadow copies.  Need a copy per device when we
-        * get to multiple devices
-        */
-       u32 ipath_lastpioindex;
-       u32 ipath_lastpioindexl;
-       /* max length of freezemsg */
-       u32 ipath_freezelen;
-       /*
-        * consecutive times we wanted a PIO buffer but were unable to
-        * get one
-        */
-       u32 ipath_consec_nopiobuf;
-       /*
-        * hint that we should update ipath_pioavailshadow before
-        * looking for a PIO buffer
-        */
-       u32 ipath_upd_pio_shadow;
-       /* so we can rewrite it after a chip reset */
-       u32 ipath_pcibar0;
-       /* so we can rewrite it after a chip reset */
-       u32 ipath_pcibar1;
-       u32 ipath_x1_fix_tries;
-       u32 ipath_autoneg_tries;
-       u32 serdes_first_init_done;
-
-       struct ipath_relock {
-               atomic_t ipath_relock_timer_active;
-               struct timer_list ipath_relock_timer;
-               unsigned int ipath_relock_interval; /* in jiffies */
-       } ipath_relock_singleton;
-
-       /* interrupt number */
-       int ipath_irq;
-       /* HT/PCI Vendor ID (here for NodeInfo) */
-       u16 ipath_vendorid;
-       /* HT/PCI Device ID (here for NodeInfo) */
-       u16 ipath_deviceid;
-       /* offset in HT config space of slave/primary interface block */
-       u8 ipath_ht_slave_off;
-       /* for write combining settings */
-       int wc_cookie;
-       /* ref count for each pkey */
-       atomic_t ipath_pkeyrefs[4];
-       /* shadow copy of struct page *'s for exp tid pages */
-       struct page **ipath_pageshadow;
-       /* shadow copy of dma handles for exp tid pages */
-       dma_addr_t *ipath_physshadow;
-       u64 __iomem *ipath_egrtidbase;
-       /* lock to workaround chip bug 9437 and others */
-       spinlock_t ipath_kernel_tid_lock;
-       spinlock_t ipath_user_tid_lock;
-       spinlock_t ipath_sendctrl_lock;
-       /* around ipath_pd and (user ports) port_cnt use (intr vs free) */
-       spinlock_t ipath_uctxt_lock;
-
-       /*
-        * IPATH_STATUS_*,
-        * this address is mapped readonly into user processes so they can
-        * get status cheaply, whenever they want.
-        */
-       u64 *ipath_statusp;
-       /* freeze msg if hw error put chip in freeze */
-       char *ipath_freezemsg;
-       /* pci access data structure */
-       struct pci_dev *pcidev;
-       struct cdev *user_cdev;
-       struct cdev *diag_cdev;
-       struct device *user_dev;
-       struct device *diag_dev;
-       /* timer used to prevent stats overflow, error throttling, etc. */
-       struct timer_list ipath_stats_timer;
-       /* timer to verify interrupts work, and fallback if possible */
-       struct timer_list ipath_intrchk_timer;
-       void *ipath_dummy_hdrq; /* used after port close */
-       dma_addr_t ipath_dummy_hdrq_phys;
-
-       /* SendDMA related entries */
-       spinlock_t            ipath_sdma_lock;
-       unsigned long         ipath_sdma_status;
-       unsigned long         ipath_sdma_abort_jiffies;
-       unsigned long         ipath_sdma_abort_intr_timeout;
-       unsigned long         ipath_sdma_buf_jiffies;
-       struct ipath_sdma_desc *ipath_sdma_descq;
-       u64                   ipath_sdma_descq_added;
-       u64                   ipath_sdma_descq_removed;
-       int                   ipath_sdma_desc_nreserved;
-       u16                   ipath_sdma_descq_cnt;
-       u16                   ipath_sdma_descq_tail;
-       u16                   ipath_sdma_descq_head;
-       u16                   ipath_sdma_next_intr;
-       u16                   ipath_sdma_reset_wait;
-       u8                    ipath_sdma_generation;
-       struct tasklet_struct ipath_sdma_abort_task;
-       struct tasklet_struct ipath_sdma_notify_task;
-       struct list_head      ipath_sdma_activelist;
-       struct list_head      ipath_sdma_notifylist;
-       atomic_t              ipath_sdma_vl15_count;
-       struct timer_list     ipath_sdma_vl15_timer;
-
-       dma_addr_t       ipath_sdma_descq_phys;
-       volatile __le64 *ipath_sdma_head_dma;
-       dma_addr_t       ipath_sdma_head_phys;
-
-       unsigned long ipath_ureg_align; /* user register alignment */
-
-       struct delayed_work ipath_autoneg_work;
-       wait_queue_head_t ipath_autoneg_wait;
-
-       /* HoL blocking / user app forward-progress state */
-       unsigned          ipath_hol_state;
-       unsigned          ipath_hol_next;
-       struct timer_list ipath_hol_timer;
-
-       /*
-        * Shadow copies of registers; size indicates read access size.
-        * Most of them are readonly, but some are write-only register,
-        * where we manipulate the bits in the shadow copy, and then write
-        * the shadow copy to infinipath.
-        *
-        * We deliberately make most of these 32 bits, since they have
-        * restricted range.  For any that we read, we won't to generate 32
-        * bit accesses, since Opteron will generate 2 separate 32 bit HT
-        * transactions for a 64 bit read, and we want to avoid unnecessary
-        * HT transactions.
-        */
-
-       /* This is the 64 bit group */
-
-       /*
-        * shadow of pioavail, check to be sure it's large enough at
-        * init time.
-        */
-       unsigned long ipath_pioavailshadow[8];
-       /* bitmap of send buffers available for the kernel to use with PIO. */
-       unsigned long ipath_pioavailkernel[8];
-       /* shadow of kr_gpio_out, for rmw ops */
-       u64 ipath_gpio_out;
-       /* shadow the gpio mask register */
-       u64 ipath_gpio_mask;
-       /* shadow the gpio output enable, etc... */
-       u64 ipath_extctrl;
-       /* kr_revision shadow */
-       u64 ipath_revision;
-       /*
-        * shadow of ibcctrl, for interrupt handling of link changes,
-        * etc.
-        */
-       u64 ipath_ibcctrl;
-       /*
-        * last ibcstatus, to suppress "duplicate" status change messages,
-        * mostly from 2 to 3
-        */
-       u64 ipath_lastibcstat;
-       /* hwerrmask shadow */
-       ipath_err_t ipath_hwerrmask;
-       ipath_err_t ipath_errormask; /* errormask shadow */
-       /* interrupt config reg shadow */
-       u64 ipath_intconfig;
-       /* kr_sendpiobufbase value */
-       u64 ipath_piobufbase;
-       /* kr_ibcddrctrl shadow */
-       u64 ipath_ibcddrctrl;
-
-       /* these are the "32 bit" regs */
-
-       /*
-        * number of GUIDs in the flash for this interface; may need some
-        * rethinking for setting on other ifaces
-        */
-       u32 ipath_nguid;
-       /*
-        * the following two are 32-bit bitmasks, but {test,clear,set}_bit
-        * all expect bit fields to be "unsigned long"
-        */
-       /* shadow kr_rcvctrl */
-       unsigned long ipath_rcvctrl;
-       /* shadow kr_sendctrl */
-       unsigned long ipath_sendctrl;
-       /* to not count armlaunch after cancel */
-       unsigned long ipath_lastcancel;
-       /* count cases where special trigger was needed (double write) */
-       unsigned long ipath_spectriggerhit;
-
-       /* value we put in kr_rcvhdrcnt */
-       u32 ipath_rcvhdrcnt;
-       /* value we put in kr_rcvhdrsize */
-       u32 ipath_rcvhdrsize;
-       /* value we put in kr_rcvhdrentsize */
-       u32 ipath_rcvhdrentsize;
-       /* offset of last entry in rcvhdrq */
-       u32 ipath_hdrqlast;
-       /* kr_portcnt value */
-       u32 ipath_portcnt;
-       /* kr_pagealign value */
-       u32 ipath_palign;
-       /* number of "2KB" PIO buffers */
-       u32 ipath_piobcnt2k;
-       /* size in bytes of "2KB" PIO buffers */
-       u32 ipath_piosize2k;
-       /* number of "4KB" PIO buffers */
-       u32 ipath_piobcnt4k;
-       /* size in bytes of "4KB" PIO buffers */
-       u32 ipath_piosize4k;
-       u32 ipath_pioreserved; /* reserved special-inkernel; */
-       /* kr_rcvegrbase value */
-       u32 ipath_rcvegrbase;
-       /* kr_rcvegrcnt value */
-       u32 ipath_rcvegrcnt;
-       /* kr_rcvtidbase value */
-       u32 ipath_rcvtidbase;
-       /* kr_rcvtidcnt value */
-       u32 ipath_rcvtidcnt;
-       /* kr_sendregbase */
-       u32 ipath_sregbase;
-       /* kr_userregbase */
-       u32 ipath_uregbase;
-       /* kr_counterregbase */
-       u32 ipath_cregbase;
-       /* shadow the control register contents */
-       u32 ipath_control;
-       /* PCI revision register (HTC rev on FPGA) */
-       u32 ipath_pcirev;
-
-       /* chip address space used by 4k pio buffers */
-       u32 ipath_4kalign;
-       /* The MTU programmed for this unit */
-       u32 ipath_ibmtu;
-       /*
-        * The max size IB packet, included IB headers that we can send.
-        * Starts same as ipath_piosize, but is affected when ibmtu is
-        * changed, or by size of eager buffers
-        */
-       u32 ipath_ibmaxlen;
-       /*
-        * ibmaxlen at init time, limited by chip and by receive buffer
-        * size.  Not changed after init.
-        */
-       u32 ipath_init_ibmaxlen;
-       /* size of each rcvegrbuffer */
-       u32 ipath_rcvegrbufsize;
-       /* localbus width (1, 2,4,8,16,32) from config space  */
-       u32 ipath_lbus_width;
-       /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
-       u32 ipath_lbus_speed;
-       /*
-        * number of sequential ibcstatus change for polling active/quiet
-        * (i.e., link not coming up).
-        */
-       u32 ipath_ibpollcnt;
-       /* low and high portions of MSI capability/vector */
-       u32 ipath_msi_lo;
-       /* saved after PCIe init for restore after reset */
-       u32 ipath_msi_hi;
-       /* MSI data (vector) saved for restore */
-       u16 ipath_msi_data;
-       /* MLID programmed for this instance */
-       u16 ipath_mlid;
-       /* LID programmed for this instance */
-       u16 ipath_lid;
-       /* list of pkeys programmed; 0 if not set */
-       u16 ipath_pkeys[4];
-       /*
-        * ASCII serial number, from flash, large enough for original
-        * all digit strings, and longer QLogic serial number format
-        */
-       u8 ipath_serial[16];
-       /* human readable board version */
-       u8 ipath_boardversion[96];
-       u8 ipath_lbus_info[32]; /* human readable localbus info */
-       /* chip major rev, from ipath_revision */
-       u8 ipath_majrev;
-       /* chip minor rev, from ipath_revision */
-       u8 ipath_minrev;
-       /* board rev, from ipath_revision */
-       u8 ipath_boardrev;
-       /* saved for restore after reset */
-       u8 ipath_pci_cacheline;
-       /* LID mask control */
-       u8 ipath_lmc;
-       /* link width supported */
-       u8 ipath_link_width_supported;
-       /* link speed supported */
-       u8 ipath_link_speed_supported;
-       u8 ipath_link_width_enabled;
-       u8 ipath_link_speed_enabled;
-       u8 ipath_link_width_active;
-       u8 ipath_link_speed_active;
-       /* Rx Polarity inversion (compensate for ~tx on partner) */
-       u8 ipath_rx_pol_inv;
-
-       u8 ipath_r_portenable_shift;
-       u8 ipath_r_intravail_shift;
-       u8 ipath_r_tailupd_shift;
-       u8 ipath_r_portcfg_shift;
-
-       /* unit # of this chip, if present */
-       int ipath_unit;
-
-       /* local link integrity counter */
-       u32 ipath_lli_counter;
-       /* local link integrity errors */
-       u32 ipath_lli_errors;
-       /*
-        * Above counts only cases where _successive_ LocalLinkIntegrity
-        * errors were seen in the receive headers of kern-packets.
-        * Below are the three (monotonically increasing) counters
-        * maintained via GPIO interrupts on iba6120-rev2.
-        */
-       u32 ipath_rxfc_unsupvl_errs;
-       u32 ipath_overrun_thresh_errs;
-       u32 ipath_lli_errs;
-
-       /*
-        * Not all devices managed by a driver instance are the same
-        * type, so these fields must be per-device.
-        */
-       u64 ipath_i_bitsextant;
-       ipath_err_t ipath_e_bitsextant;
-       ipath_err_t ipath_hwe_bitsextant;
-
-       /*
-        * Below should be computable from number of ports,
-        * since they are never modified.
-        */
-       u64 ipath_i_rcvavail_mask;
-       u64 ipath_i_rcvurg_mask;
-       u16 ipath_i_rcvurg_shift;
-       u16 ipath_i_rcvavail_shift;
-
-       /*
-        * Register bits for selecting i2c direction and values, used for
-        * I2C serial flash.
-        */
-       u8 ipath_gpio_sda_num;
-       u8 ipath_gpio_scl_num;
-       u8 ipath_i2c_chain_type;
-       u64 ipath_gpio_sda;
-       u64 ipath_gpio_scl;
-
-       /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
-       spinlock_t ipath_gpio_lock;
-
-       /*
-        * IB link and linktraining states and masks that vary per chip in
-        * some way.  Set at init, to avoid each IB status change interrupt
-        */
-       u8 ibcs_ls_shift;
-       u8 ibcs_lts_mask;
-       u32 ibcs_mask;
-       u32 ib_init;
-       u32 ib_arm;
-       u32 ib_active;
-
-       u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
-
-       /*
-        * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
-        * reg. Changes for IBA7220
-        */
-       u8 ibcc_lic_mask; /* LinkInitCmd */
-       u8 ibcc_lc_shift; /* LinkCmd */
-       u8 ibcc_mpl_shift; /* Maxpktlen */
-
-       u8 delay_mult;
-
-       /* used to override LED behavior */
-       u8 ipath_led_override;  /* Substituted for normal value, if non-zero */
-       u16 ipath_led_override_timeoff; /* delta to next timer event */
-       u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
-       u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
-       atomic_t ipath_led_override_timer_active;
-       /* Used to flash LEDs in override mode */
-       struct timer_list ipath_led_override_timer;
-
-       /* Support (including locks) for EEPROM logging of errors and time */
-       /* control access to actual counters, timer */
-       spinlock_t ipath_eep_st_lock;
-       /* control high-level access to EEPROM */
-       struct mutex ipath_eep_lock;
-       /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
-       uint64_t ipath_traffic_wds;
-       /* active time is kept in seconds, but logged in hours */
-       atomic_t ipath_active_time;
-       /* Below are nominal shadow of EEPROM, new since last EEPROM update */
-       uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
-       uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
-       uint16_t ipath_eep_hrs;
-       /*
-        * masks for which bits of errs, hwerrs that cause
-        * each of the counters to increment.
-        */
-       struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
-
-       /* interrupt mitigation reload register info */
-       u16 ipath_jint_idle_ticks;      /* idle clock ticks */
-       u16 ipath_jint_max_packets;     /* max packets across all ports */
-
-       /*
-        * lock for access to SerDes, and flags to sequence preset
-        * versus steady-state. 7220-only at the moment.
-        */
-       spinlock_t ipath_sdepb_lock;
-       u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
-};
-
-/* ipath_hol_state values (stopping/starting user proc, send flushing) */
-#define IPATH_HOL_UP       0
-#define IPATH_HOL_DOWN     1
-/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
-#define IPATH_HOL_DOWNSTOP 0
-#define IPATH_HOL_DOWNCONT 1
-
-/* bit positions for sdma_status */
-#define IPATH_SDMA_ABORTING  0
-#define IPATH_SDMA_DISARMED  1
-#define IPATH_SDMA_DISABLED  2
-#define IPATH_SDMA_LAYERBUF  3
-#define IPATH_SDMA_RUNNING  30
-#define IPATH_SDMA_SHUTDOWN 31
-
-/* bit combinations that correspond to abort states */
-#define IPATH_SDMA_ABORT_NONE 0
-#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
-#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISARMED))
-#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-
-#define IPATH_SDMA_BUF_NONE 0
-#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
-
-/* Private data for file operations */
-struct ipath_filedata {
-       struct ipath_portdata *pd;
-       unsigned subport;
-       unsigned tidcursor;
-       struct ipath_user_sdma_queue *pq;
-};
-extern struct list_head ipath_dev_list;
-extern spinlock_t ipath_devs_lock;
-extern struct ipath_devdata *ipath_lookup(int unit);
-
-int ipath_init_chip(struct ipath_devdata *, int);
-int ipath_enable_wc(struct ipath_devdata *dd);
-void ipath_disable_wc(struct ipath_devdata *dd);
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
-void ipath_shutdown_device(struct ipath_devdata *);
-void ipath_clear_freeze(struct ipath_devdata *);
-
-struct file_operations;
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
-                   struct cdev **cdevp, struct device **devp);
-void ipath_cdev_cleanup(struct cdev **cdevp,
-                       struct device **devp);
-
-int ipath_diag_add(struct ipath_devdata *);
-void ipath_diag_remove(struct ipath_devdata *);
-
-extern wait_queue_head_t ipath_state_wait;
-
-int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_remove(struct ipath_devdata *dd);
-
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
-
-extern int ipath_diag_inuse;
-
-irqreturn_t ipath_intr(int irq, void *devid);
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
-                    ipath_err_t err);
-#if __IPATH_INFO || __IPATH_DBG
-extern const char *ipath_ibcstatus_str[];
-#endif
-
-/* clean up any per-chip chip-specific stuff */
-void ipath_chip_cleanup(struct ipath_devdata *);
-/* clean up any chip type-specific stuff */
-void ipath_chip_done(void);
-
-void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
-                         unsigned cnt);
-void ipath_cancel_sends(struct ipath_devdata *, int);
-
-int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
-
-int ipath_parse_ushort(const char *str, unsigned short *valp);
-
-void ipath_kreceive(struct ipath_portdata *);
-int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
-int ipath_reset_device(int);
-void ipath_get_faststats(unsigned long);
-int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
-int ipath_set_linkstate(struct ipath_devdata *, u8);
-int ipath_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_lid(struct ipath_devdata *, u32, u8);
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
-void ipath_enable_armlaunch(struct ipath_devdata *);
-void ipath_disable_armlaunch(struct ipath_devdata *);
-void ipath_hol_down(struct ipath_devdata *);
-void ipath_hol_up(struct ipath_devdata *);
-void ipath_hol_event(unsigned long);
-void ipath_toggle_rclkrls(struct ipath_devdata *);
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
-void ipath_set_relock_poll(struct ipath_devdata *, int);
-void ipath_shutdown_relock_poll(struct ipath_devdata *);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
-#define subport_fp(fp) \
-       ((struct ipath_filedata *)(fp)->private_data)->subport
-#define tidcursor_fp(fp) \
-       ((struct ipath_filedata *)(fp)->private_data)->tidcursor
-#define user_sdma_queue_fp(fp) \
-       ((struct ipath_filedata *)(fp)->private_data)->pq
-
-/*
- * values for ipath_flags
- */
-               /* chip can report link latency (IB 1.2) */
-#define IPATH_HAS_LINK_LATENCY 0x1
-               /* The chip is up and initted */
-#define IPATH_INITTED       0x2
-               /* set if any user code has set kr_rcvhdrsize */
-#define IPATH_RCVHDRSZ_SET  0x4
-               /* The chip is present and valid for accesses */
-#define IPATH_PRESENT       0x8
-               /* HT link0 is only 8 bits wide, ignore upper byte crc
-                * errors, etc. */
-#define IPATH_8BIT_IN_HT0   0x10
-               /* HT link1 is only 8 bits wide, ignore upper byte crc
-                * errors, etc. */
-#define IPATH_8BIT_IN_HT1   0x20
-               /* The link is down */
-#define IPATH_LINKDOWN      0x40
-               /* The link level is up (0x11) */
-#define IPATH_LINKINIT      0x80
-               /* The link is in the armed (0x21) state */
-#define IPATH_LINKARMED     0x100
-               /* The link is in the active (0x31) state */
-#define IPATH_LINKACTIVE    0x200
-               /* link current state is unknown */
-#define IPATH_LINKUNK       0x400
-               /* Write combining flush needed for PIO */
-#define IPATH_PIO_FLUSH_WC  0x1000
-               /* DMA Receive tail pointer */
-#define IPATH_NODMA_RTAIL   0x2000
-               /* no IB cable, or no device on IB cable */
-#define IPATH_NOCABLE       0x4000
-               /* Supports port zero per packet receive interrupts via
-                * GPIO */
-#define IPATH_GPIO_INTR     0x8000
-               /* uses the coded 4byte TID, not 8 byte */
-#define IPATH_4BYTE_TID     0x10000
-               /* packet/word counters are 32 bit, else those 4 counters
-                * are 64bit */
-#define IPATH_32BITCOUNTERS 0x20000
-               /* Interrupt register is 64 bits */
-#define IPATH_INTREG_64     0x40000
-               /* can miss port0 rx interrupts */
-#define IPATH_DISABLED      0x80000 /* administratively disabled */
-               /* Use GPIO interrupts for new counters */
-#define IPATH_GPIO_ERRINTRS 0x100000
-#define IPATH_SWAP_PIOBUFS  0x200000
-               /* Supports Send DMA */
-#define IPATH_HAS_SEND_DMA  0x400000
-               /* Supports Send Count (not just word count) in PBC */
-#define IPATH_HAS_PBC_CNT   0x800000
-               /* Suppress heartbeat, even if turning off loopback */
-#define IPATH_NO_HRTBT      0x1000000
-#define IPATH_HAS_THRESH_UPDATE 0x4000000
-#define IPATH_HAS_MULT_IB_SPEED 0x8000000
-#define IPATH_IB_AUTONEG_INPROG 0x10000000
-#define IPATH_IB_AUTONEG_FAILED 0x20000000
-               /* Linkdown-disable intentionally, Do not attempt to bring up */
-#define IPATH_IB_LINK_DISABLED 0x40000000
-#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
-
-/* Bits in GPIO for the added interrupts */
-#define IPATH_GPIO_PORT0_BIT 2
-#define IPATH_GPIO_RXUVL_BIT 3
-#define IPATH_GPIO_OVRUN_BIT 4
-#define IPATH_GPIO_LLI_BIT 5
-#define IPATH_GPIO_ERRINTR_MASK 0x38
-
-/* portdata flag bit offsets */
-               /* waiting for a packet to arrive */
-#define IPATH_PORT_WAITING_RCV   2
-               /* master has not finished initializing */
-#define IPATH_PORT_MASTER_UNINIT 4
-               /* waiting for an urgent packet to arrive */
-#define IPATH_PORT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void ipath_free_data(struct ipath_portdata *dd);
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
-                               unsigned len, int avail);
-void ipath_init_iba6110_funcs(struct ipath_devdata *);
-void ipath_get_eeprom_info(struct ipath_devdata *);
-int ipath_update_eeprom_log(struct ipath_devdata *dd);
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
-u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *);
-void ipath_force_pio_avail_update(struct ipath_devdata *);
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define IPATH_LED_LOG 2  /* Logical (link) YELLOW LED */
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
-
-/* send dma routines */
-int setup_sdma(struct ipath_devdata *);
-void teardown_sdma(struct ipath_devdata *);
-void ipath_restart_sdma(struct ipath_devdata *);
-void ipath_sdma_intr(struct ipath_devdata *);
-int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
-                         u32, struct ipath_verbs_txreq *);
-/* ipath_sdma_lock should be locked before calling this. */
-int ipath_sdma_make_progress(struct ipath_devdata *dd);
-
-/* must be called under ipath_sdma_lock */
-static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
-{
-       return dd->ipath_sdma_descq_cnt -
-               (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
-               1 - dd->ipath_sdma_desc_nreserved;
-}
-
-static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
-{
-       dd->ipath_sdma_desc_nreserved += cnt;
-}
-
-static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
-{
-       dd->ipath_sdma_desc_nreserved -= cnt;
-}
-
-/*
- * number of words used for protocol header if not set by ipath_userinit();
- */
-#define IPATH_DFLT_RCVHDRSIZE 9
-
-int ipath_get_user_pages(unsigned long, size_t, struct page **);
-void ipath_release_user_pages(struct page **, size_t);
-void ipath_release_user_pages_on_close(struct page **, size_t);
-int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
-int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
-int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
-int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
-
-/* these are used for the registers that vary with port */
-void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
-                          unsigned, u64);
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner.  It also gives us some error
- * checking.  64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible.  User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/*
- * At the moment, none of the s-registers are writable, so no
- * ipath_write_sreg().
- */
-
-/**
- * ipath_read_ureg32 - read 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @port: port number
- *
- * Return the contents of a register that is virtualized to be per port.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
-                                   ipath_ureg regno, int port)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return 0;
-
-       return readl(regno + (u64 __iomem *)
-                    (dd->ipath_uregbase +
-                     (char __iomem *)dd->ipath_kregbase +
-                     dd->ipath_ureg_align * port));
-}
-
-/**
- * ipath_write_ureg - write 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @value: value
- * @port: port
- *
- * Write the contents of a register that is virtualized to be per port.
- */
-static inline void ipath_write_ureg(const struct ipath_devdata *dd,
-                                   ipath_ureg regno, u64 value, int port)
-{
-       u64 __iomem *ubase = (u64 __iomem *)
-               (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
-                dd->ipath_ureg_align * port);
-       if (dd->ipath_kregbase)
-               writeq(value, &ubase[regno]);
-}
-
-static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
-                                   ipath_kreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return -1;
-       return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
-                                   ipath_kreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return -1;
-
-       return readq(&dd->ipath_kregbase[regno]);
-}
-
-static inline void ipath_write_kreg(const struct ipath_devdata *dd,
-                                   ipath_kreg regno, u64 value)
-{
-       if (dd->ipath_kregbase)
-               writeq(value, &dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
-                                 ipath_sreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return 0;
-
-       return readq(regno + (u64 __iomem *)
-                    (dd->ipath_cregbase +
-                     (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
-                                        ipath_sreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return 0;
-       return readl(regno + (u64 __iomem *)
-                    (dd->ipath_cregbase +
-                     (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_write_creg(const struct ipath_devdata *dd,
-                                   ipath_creg regno, u64 value)
-{
-       if (dd->ipath_kregbase)
-               writeq(value, regno + (u64 __iomem *)
-                      (dd->ipath_cregbase +
-                       (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
-{
-       *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
-{
-       return (u32) le64_to_cpu(*((volatile __le64 *)
-                               pd->port_rcvhdrtail_kvaddr));
-}
-
-static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
-{
-       const struct ipath_devdata *dd = pd->port_dd;
-       u32 hdrqtail;
-
-       if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-               __le32 *rhf_addr;
-               u32 seq;
-
-               rhf_addr = (__le32 *) pd->port_rcvhdrq +
-                       pd->port_head + dd->ipath_rhf_offset;
-               seq = ipath_hdrget_seq(rhf_addr);
-               hdrqtail = pd->port_head;
-               if (seq == pd->port_seq_cnt)
-                       hdrqtail++;
-       } else
-               hdrqtail = ipath_get_rcvhdrtail(pd);
-
-       return hdrqtail;
-}
-
-static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
-{
-       return (dd->ipath_flags & IPATH_INTREG_64) ?
-               ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return linkstate
- * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
- * everywhere, anyway (and should be, for almost all purposes).
- */
-static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
-       u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
-               INFINIPATH_IBCS_LINKSTATE_MASK;
-       if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
-               state = INFINIPATH_IBCS_L_STATE_ACTIVE;
-       return state;
-}
-
-/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
-static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
-{
-       return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-               dd->ibcs_lts_mask;
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return logical link state
- * combination of link state and linktraining state (down, active, init,
- * arm, etc.
- */
-static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
-{
-       u32 ibs;
-       ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-               dd->ibcs_lts_mask;
-       ibs |= (u32)(ibcs &
-               (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
-       return ibs;
-}
-
-/*
- * sysfs interface.
- */
-
-struct device_driver;
-
-extern const char ib_ipath_version[];
-
-extern const struct attribute_group *ipath_driver_attr_groups[];
-
-int ipath_device_create_group(struct device *, struct ipath_devdata *);
-void ipath_device_remove_group(struct device *, struct ipath_devdata *);
-int ipath_expose_reset(struct device *);
-
-int ipath_init_ipathfs(void);
-void ipath_exit_ipathfs(void);
-int ipathfs_add_device(struct ipath_devdata *);
-int ipathfs_remove_device(struct ipath_devdata *);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
-                         size_t, int);
-dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
-const char *ipath_get_unit_name(int unit);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-#if defined(CONFIG_X86_64)
-#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
-#else
-#define ipath_flush_wc() wmb()
-#endif
-
-extern unsigned ipath_debug; /* debugging bit mask */
-extern unsigned ipath_linkrecovery;
-extern unsigned ipath_mtu4096;
-extern struct mutex ipath_mutex;
-
-#define IPATH_DRV_NAME         "ib_ipath"
-#define IPATH_MAJOR            233
-#define IPATH_USER_MINOR_BASE  0
-#define IPATH_DIAGPKT_MINOR    127
-#define IPATH_DIAG_MINOR_BASE  129
-#define IPATH_NMINORS          255
-
-#define ipath_dev_err(dd,fmt,...) \
-       do { \
-               const struct ipath_devdata *__dd = (dd); \
-               if (__dd->pcidev) \
-                       dev_err(&__dd->pcidev->dev, "%s: " fmt, \
-                               ipath_get_unit_name(__dd->ipath_unit), \
-                               ##__VA_ARGS__); \
-               else \
-                       printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
-                              ipath_get_unit_name(__dd->ipath_unit), \
-                              ##__VA_ARGS__); \
-       } while (0)
-
-#if _IPATH_DEBUGGING
-
-# define __IPATH_DBG_WHICH(which,fmt,...) \
-       do { \
-               if (unlikely(ipath_debug & (which))) \
-                       printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
-                              __func__,##__VA_ARGS__); \
-       } while(0)
-
-# define ipath_dbg(fmt,...) \
-       __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
-# define ipath_cdbg(which,fmt,...) \
-       __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
-
-#else /* ! _IPATH_DEBUGGING */
-
-# define ipath_dbg(fmt,...)
-# define ipath_cdbg(which,fmt,...)
-
-#endif /* _IPATH_DEBUGGING */
-
-/*
- * this is used for formatting hw error messages...
- */
-struct ipath_hwerror_msgs {
-       u64 mask;
-       const char *msg;
-};
-
-#define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
-
-/* in ipath_intr.c... */
-void ipath_format_hwerrors(u64 hwerrs,
-                          const struct ipath_hwerror_msgs *hwerrmsgs,
-                          size_t nhwerrmsgs,
-                          char *msg, size_t lmsg);
-
-#endif                         /* _IPATH_KERNEL_H */
diff --git a/drivers/staging/rdma/ipath/ipath_keys.c b/drivers/staging/rdma/ipath/ipath_keys.c
deleted file mode 100644 (file)
index c0e933f..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <asm/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_alloc_lkey - allocate an lkey
- * @rkt: lkey table in which to allocate the lkey
- * @mr: memory region that this lkey protects
- *
- * Returns 1 if successful, otherwise returns 0.
- */
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
-{
-       unsigned long flags;
-       u32 r;
-       u32 n;
-       int ret;
-
-       spin_lock_irqsave(&rkt->lock, flags);
-
-       /* Find the next available LKEY */
-       r = n = rkt->next;
-       for (;;) {
-               if (rkt->table[r] == NULL)
-                       break;
-               r = (r + 1) & (rkt->max - 1);
-               if (r == n) {
-                       spin_unlock_irqrestore(&rkt->lock, flags);
-                       ipath_dbg("LKEY table full\n");
-                       ret = 0;
-                       goto bail;
-               }
-       }
-       rkt->next = (r + 1) & (rkt->max - 1);
-       /*
-        * Make sure lkey is never zero which is reserved to indicate an
-        * unrestricted LKEY.
-        */
-       rkt->gen++;
-       mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
-               ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
-                << 8);
-       if (mr->lkey == 0) {
-               mr->lkey |= 1 << 8;
-               rkt->gen++;
-       }
-       rkt->table[r] = mr;
-       spin_unlock_irqrestore(&rkt->lock, flags);
-
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_free_lkey - free an lkey
- * @rkt: table from which to free the lkey
- * @lkey: lkey id to free
- */
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
-{
-       unsigned long flags;
-       u32 r;
-
-       if (lkey == 0)
-               return;
-       r = lkey >> (32 - ib_ipath_lkey_table_size);
-       spin_lock_irqsave(&rkt->lock, flags);
-       rkt->table[r] = NULL;
-       spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * ipath_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
-                 struct ib_sge *sge, int acc)
-{
-       struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
-       struct ipath_mregion *mr;
-       unsigned n, m;
-       size_t off;
-       int ret;
-
-       /*
-        * We use LKEY == zero for kernel virtual addresses
-        * (see ipath_get_dma_mr and ipath_dma.c).
-        */
-       if (sge->lkey == 0) {
-               /* always a kernel port, no locking needed */
-               struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
-               if (pd->user) {
-                       ret = 0;
-                       goto bail;
-               }
-               isge->mr = NULL;
-               isge->vaddr = (void *) sge->addr;
-               isge->length = sge->length;
-               isge->sge_length = sge->length;
-               ret = 1;
-               goto bail;
-       }
-       mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
-       if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
-                    qp->ibqp.pd != mr->pd)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off = sge->addr - mr->user_base;
-       if (unlikely(sge->addr < mr->user_base ||
-                    off + sge->length > mr->length ||
-                    (mr->access_flags & acc) != acc)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off += mr->offset;
-       m = 0;
-       n = 0;
-       while (off >= mr->map[m]->segs[n].length) {
-               off -= mr->map[m]->segs[n].length;
-               n++;
-               if (n >= IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       isge->mr = mr;
-       isge->vaddr = mr->map[m]->segs[n].vaddr + off;
-       isge->length = mr->map[m]->segs[n].length - off;
-       isge->sge_length = sge->length;
-       isge->m = m;
-       isge->n = n;
-
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_rkey_ok - check the IB virtual address, length, and RKEY
- * @dev: infiniband device
- * @ss: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- */
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
-                 u32 len, u64 vaddr, u32 rkey, int acc)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_lkey_table *rkt = &dev->lk_table;
-       struct ipath_sge *sge = &ss->sge;
-       struct ipath_mregion *mr;
-       unsigned n, m;
-       size_t off;
-       int ret;
-
-       /*
-        * We use RKEY == zero for kernel virtual addresses
-        * (see ipath_get_dma_mr and ipath_dma.c).
-        */
-       if (rkey == 0) {
-               /* always a kernel port, no locking needed */
-               struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
-               if (pd->user) {
-                       ret = 0;
-                       goto bail;
-               }
-               sge->mr = NULL;
-               sge->vaddr = (void *) vaddr;
-               sge->length = len;
-               sge->sge_length = len;
-               ss->sg_list = NULL;
-               ss->num_sge = 1;
-               ret = 1;
-               goto bail;
-       }
-
-       mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
-       if (unlikely(mr == NULL || mr->lkey != rkey ||
-                    qp->ibqp.pd != mr->pd)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off = vaddr - mr->iova;
-       if (unlikely(vaddr < mr->iova || off + len > mr->length ||
-                    (mr->access_flags & acc) == 0)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off += mr->offset;
-       m = 0;
-       n = 0;
-       while (off >= mr->map[m]->segs[n].length) {
-               off -= mr->map[m]->segs[n].length;
-               n++;
-               if (n >= IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       sge->mr = mr;
-       sge->vaddr = mr->map[m]->segs[n].vaddr + off;
-       sge->length = mr->map[m]->segs[n].length - off;
-       sge->sge_length = len;
-       sge->m = m;
-       sge->n = n;
-       ss->sg_list = NULL;
-       ss->num_sge = 1;
-
-       ret = 1;
-
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mad.c b/drivers/staging/rdma/ipath/ipath_mad.c
deleted file mode 100644 (file)
index ad3a926..0000000
+++ /dev/null
@@ -1,1521 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_pma.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define IB_SMP_UNSUP_VERSION   cpu_to_be16(0x0004)
-#define IB_SMP_UNSUP_METHOD    cpu_to_be16(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
-#define IB_SMP_INVALID_FIELD   cpu_to_be16(0x001C)
-
-static int reply(struct ib_smp *smp)
-{
-       /*
-        * The verbs framework will handle the directed/LID route
-        * packet changes.
-        */
-       smp->method = IB_MGMT_METHOD_GET_RESP;
-       if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
-               smp->status |= IB_SMP_DIRECTION;
-       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int recv_subn_get_nodedescription(struct ib_smp *smp,
-                                        struct ib_device *ibdev)
-{
-       if (smp->attr_mod)
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
-       return reply(smp);
-}
-
-struct nodeinfo {
-       u8 base_version;
-       u8 class_version;
-       u8 node_type;
-       u8 num_ports;
-       __be64 sys_guid;
-       __be64 node_guid;
-       __be64 port_guid;
-       __be16 partition_cap;
-       __be16 device_id;
-       __be32 revision;
-       u8 local_port_num;
-       u8 vendor_id[3];
-} __attribute__ ((packed));
-
-static int recv_subn_get_nodeinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev, u8 port)
-{
-       struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
-       struct ipath_devdata *dd = to_idev(ibdev)->dd;
-       u32 vendor, majrev, minrev;
-
-       /* GUID 0 is illegal */
-       if (smp->attr_mod || (dd->ipath_guid == 0))
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       nip->base_version = 1;
-       nip->class_version = 1;
-       nip->node_type = 1;     /* channel adapter */
-       /*
-        * XXX The num_ports value will need a layer function to get
-        * the value if we ever have more than one IB port on a chip.
-        * We will also need to get the GUID for the port.
-        */
-       nip->num_ports = ibdev->phys_port_cnt;
-       /* This is already in network order */
-       nip->sys_guid = to_idev(ibdev)->sys_image_guid;
-       nip->node_guid = dd->ipath_guid;
-       nip->port_guid = dd->ipath_guid;
-       nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
-       nip->device_id = cpu_to_be16(dd->ipath_deviceid);
-       majrev = dd->ipath_majrev;
-       minrev = dd->ipath_minrev;
-       nip->revision = cpu_to_be32((majrev << 16) | minrev);
-       nip->local_port_num = port;
-       vendor = dd->ipath_vendorid;
-       nip->vendor_id[0] = IPATH_SRC_OUI_1;
-       nip->vendor_id[1] = IPATH_SRC_OUI_2;
-       nip->vendor_id[2] = IPATH_SRC_OUI_3;
-
-       return reply(smp);
-}
-
-static int recv_subn_get_guidinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev)
-{
-       u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
-       __be64 *p = (__be64 *) smp->data;
-
-       /* 32 blocks of 8 64-bit GUIDs per block */
-
-       memset(smp->data, 0, sizeof(smp->data));
-
-       /*
-        * We only support one GUID for now.  If this changes, the
-        * portinfo.guid_cap field needs to be updated too.
-        */
-       if (startgx == 0) {
-               __be64 g = to_idev(ibdev)->dd->ipath_guid;
-               if (g == 0)
-                       /* GUID 0 is illegal */
-                       smp->status |= IB_SMP_INVALID_FIELD;
-               else
-                       /* The first is a copy of the read-only HW GUID. */
-                       *p = g;
-       } else
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       return reply(smp);
-}
-
-static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
-{
-       (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
-{
-       (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct ipath_devdata *dd)
-{
-       return (dd->ipath_ibcctrl >>
-               INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
-{
-       unsigned v;
-
-       v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-       if (v != n) {
-               dd->ipath_ibcctrl &=
-                       ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
-                         INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
-               dd->ipath_ibcctrl |=
-                       (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-       }
-       return 0;
-}
-
-static int get_phyerrthreshold(struct ipath_devdata *dd)
-{
-       return (dd->ipath_ibcctrl >>
-               INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
-{
-       unsigned v;
-
-       v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-       if (v != n) {
-               dd->ipath_ibcctrl &=
-                       ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
-                         INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
-               dd->ipath_ibcctrl |=
-                       (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-       }
-       return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @dd: the infinipath device
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct ipath_devdata *dd)
-{
-       return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
-}
-
-static int recv_subn_get_portinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev, u8 port)
-{
-       struct ipath_ibdev *dev;
-       struct ipath_devdata *dd;
-       struct ib_port_info *pip = (struct ib_port_info *)smp->data;
-       u16 lid;
-       u8 ibcstat;
-       u8 mtu;
-       int ret;
-
-       if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
-               smp->status |= IB_SMP_INVALID_FIELD;
-               ret = reply(smp);
-               goto bail;
-       }
-
-       dev = to_idev(ibdev);
-       dd = dev->dd;
-
-       /* Clear all fields.  Only set the non-zero fields. */
-       memset(smp->data, 0, sizeof(smp->data));
-
-       /* Only return the mkey if the protection field allows it. */
-       if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
-           dev->mkeyprot == 0)
-               pip->mkey = dev->mkey;
-       pip->gid_prefix = dev->gid_prefix;
-       lid = dd->ipath_lid;
-       pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
-       pip->sm_lid = cpu_to_be16(dev->sm_lid);
-       pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
-       /* pip->diag_code; */
-       pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
-       pip->local_port_num = port;
-       pip->link_width_enabled = dd->ipath_link_width_enabled;
-       pip->link_width_supported = dd->ipath_link_width_supported;
-       pip->link_width_active = dd->ipath_link_width_active;
-       pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
-       ibcstat = dd->ipath_lastibcstat;
-       /* map LinkState to IB portinfo values.  */
-       pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
-
-       pip->portphysstate_linkdown =
-               (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
-               (get_linkdowndefaultstate(dd) ? 1 : 2);
-       pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
-       pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
-               dd->ipath_link_speed_enabled;
-       switch (dd->ipath_ibmtu) {
-       case 4096:
-               mtu = IB_MTU_4096;
-               break;
-       case 2048:
-               mtu = IB_MTU_2048;
-               break;
-       case 1024:
-               mtu = IB_MTU_1024;
-               break;
-       case 512:
-               mtu = IB_MTU_512;
-               break;
-       case 256:
-               mtu = IB_MTU_256;
-               break;
-       default:                /* oops, something is wrong */
-               mtu = IB_MTU_2048;
-               break;
-       }
-       pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
-       pip->vlcap_inittype = 0x10;     /* VLCap = VL0, InitType = 0 */
-       pip->vl_high_limit = dev->vl_high_limit;
-       /* pip->vl_arb_high_cap; // only one VL */
-       /* pip->vl_arb_low_cap; // only one VL */
-       /* InitTypeReply = 0 */
-       /* our mtu cap depends on whether 4K MTU enabled or not */
-       pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
-       /* HCAs ignore VLStallCount and HOQLife */
-       /* pip->vlstallcnt_hoqlife; */
-       pip->operationalvl_pei_peo_fpi_fpo = 0x10;      /* OVLs = 1 */
-       pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
-       /* P_KeyViolations are counted by hardware. */
-       pip->pkey_violations =
-               cpu_to_be16((ipath_get_cr_errpkey(dd) -
-                            dev->z_pkey_violations) & 0xFFFF);
-       pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
-       /* Only the hardware GUID is supported for now */
-       pip->guid_cap = 1;
-       pip->clientrereg_resv_subnetto = dev->subnet_timeout;
-       /* 32.768 usec. response time (guessing) */
-       pip->resv_resptimevalue = 3;
-       pip->localphyerrors_overrunerrors =
-               (get_phyerrthreshold(dd) << 4) |
-               get_overrunthreshold(dd);
-       /* pip->max_credit_hint; */
-       if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
-               u32 v;
-
-               v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
-               pip->link_roundtrip_latency[0] = v >> 16;
-               pip->link_roundtrip_latency[1] = v >> 8;
-               pip->link_roundtrip_latency[2] = v;
-       }
-
-       ret = reply(smp);
-
-bail:
-       return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
-       /* always a kernel port, no locking needed */
-       struct ipath_portdata *pd = dd->ipath_pd[0];
-
-       memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
-
-       return 0;
-}
-
-static int recv_subn_get_pkeytable(struct ib_smp *smp,
-                                  struct ib_device *ibdev)
-{
-       u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
-       u16 *p = (u16 *) smp->data;
-       __be16 *q = (__be16 *) smp->data;
-
-       /* 64 blocks of 32 16-bit P_Key entries */
-
-       memset(smp->data, 0, sizeof(smp->data));
-       if (startpx == 0) {
-               struct ipath_ibdev *dev = to_idev(ibdev);
-               unsigned i, n = ipath_get_npkeys(dev->dd);
-
-               get_pkeys(dev->dd, p);
-
-               for (i = 0; i < n; i++)
-                       q[i] = cpu_to_be16(p[i]);
-       } else
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       return reply(smp);
-}
-
-static int recv_subn_set_guidinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev)
-{
-       /* The only GUID we support is the first read-only entry. */
-       return recv_subn_get_guidinfo(smp, ibdev);
-}
-
-/**
- * set_linkdowndefaultstate - set the default linkdown state
- * @dd: the infinipath device
- * @sleep: the new state
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
-{
-       if (sleep)
-               dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-       else
-               dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                        dd->ipath_ibcctrl);
-       return 0;
-}
-
-/**
- * recv_subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int recv_subn_set_portinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev, u8 port)
-{
-       struct ib_port_info *pip = (struct ib_port_info *)smp->data;
-       struct ib_event event;
-       struct ipath_ibdev *dev;
-       struct ipath_devdata *dd;
-       char clientrereg = 0;
-       u16 lid, smlid;
-       u8 lwe;
-       u8 lse;
-       u8 state;
-       u16 lstate;
-       u32 mtu;
-       int ret, ore;
-
-       if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
-               goto err;
-
-       dev = to_idev(ibdev);
-       dd = dev->dd;
-       event.device = ibdev;
-       event.element.port_num = port;
-
-       dev->mkey = pip->mkey;
-       dev->gid_prefix = pip->gid_prefix;
-       dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
-       lid = be16_to_cpu(pip->lid);
-       if (dd->ipath_lid != lid ||
-           dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
-               /* Must be a valid unicast LID address. */
-               if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
-                       goto err;
-               ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
-               event.event = IB_EVENT_LID_CHANGE;
-               ib_dispatch_event(&event);
-       }
-
-       smlid = be16_to_cpu(pip->sm_lid);
-       if (smlid != dev->sm_lid) {
-               /* Must be a valid unicast LID address. */
-               if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
-                       goto err;
-               dev->sm_lid = smlid;
-               event.event = IB_EVENT_SM_CHANGE;
-               ib_dispatch_event(&event);
-       }
-
-       /* Allow 1x or 4x to be set (see 14.2.6.6). */
-       lwe = pip->link_width_enabled;
-       if (lwe) {
-               if (lwe == 0xFF)
-                       lwe = dd->ipath_link_width_supported;
-               else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
-                       goto err;
-               set_link_width_enabled(dd, lwe);
-       }
-
-       /* Allow 2.5 or 5.0 Gbs. */
-       lse = pip->linkspeedactive_enabled & 0xF;
-       if (lse) {
-               if (lse == 15)
-                       lse = dd->ipath_link_speed_supported;
-               else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
-                       goto err;
-               set_link_speed_enabled(dd, lse);
-       }
-
-       /* Set link down default state. */
-       switch (pip->portphysstate_linkdown & 0xF) {
-       case 0: /* NOP */
-               break;
-       case 1: /* SLEEP */
-               if (set_linkdowndefaultstate(dd, 1))
-                       goto err;
-               break;
-       case 2: /* POLL */
-               if (set_linkdowndefaultstate(dd, 0))
-                       goto err;
-               break;
-       default:
-               goto err;
-       }
-
-       dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
-       dev->vl_high_limit = pip->vl_high_limit;
-
-       switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
-       case IB_MTU_256:
-               mtu = 256;
-               break;
-       case IB_MTU_512:
-               mtu = 512;
-               break;
-       case IB_MTU_1024:
-               mtu = 1024;
-               break;
-       case IB_MTU_2048:
-               mtu = 2048;
-               break;
-       case IB_MTU_4096:
-               if (!ipath_mtu4096)
-                       goto err;
-               mtu = 4096;
-               break;
-       default:
-               /* XXX We have already partially updated our state! */
-               goto err;
-       }
-       ipath_set_mtu(dd, mtu);
-
-       dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
-
-       /* We only support VL0 */
-       if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
-               goto err;
-
-       if (pip->mkey_violations == 0)
-               dev->mkey_violations = 0;
-
-       /*
-        * Hardware counter can't be reset so snapshot and subtract
-        * later.
-        */
-       if (pip->pkey_violations == 0)
-               dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
-
-       if (pip->qkey_violations == 0)
-               dev->qkey_violations = 0;
-
-       ore = pip->localphyerrors_overrunerrors;
-       if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
-               goto err;
-
-       if (set_overrunthreshold(dd, (ore & 0xF)))
-               goto err;
-
-       dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
-       if (pip->clientrereg_resv_subnetto & 0x80) {
-               clientrereg = 1;
-               event.event = IB_EVENT_CLIENT_REREGISTER;
-               ib_dispatch_event(&event);
-       }
-
-       /*
-        * Do the port state change now that the other link parameters
-        * have been set.
-        * Changing the port physical state only makes sense if the link
-        * is down or is being set to down.
-        */
-       state = pip->linkspeed_portstate & 0xF;
-       lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
-       if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-               goto err;
-
-       /*
-        * Only state changes of DOWN, ARM, and ACTIVE are valid
-        * and must be in the correct state to take effect (see 7.2.6).
-        */
-       switch (state) {
-       case IB_PORT_NOP:
-               if (lstate == 0)
-                       break;
-               /* FALLTHROUGH */
-       case IB_PORT_DOWN:
-               if (lstate == 0)
-                       lstate = IPATH_IB_LINKDOWN_ONLY;
-               else if (lstate == 1)
-                       lstate = IPATH_IB_LINKDOWN_SLEEP;
-               else if (lstate == 2)
-                       lstate = IPATH_IB_LINKDOWN;
-               else if (lstate == 3)
-                       lstate = IPATH_IB_LINKDOWN_DISABLE;
-               else
-                       goto err;
-               ipath_set_linkstate(dd, lstate);
-               if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
-                       ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-                       goto done;
-               }
-               ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
-                               IPATH_LINKACTIVE, 1000);
-               break;
-       case IB_PORT_ARMED:
-               ipath_set_linkstate(dd, IPATH_IB_LINKARM);
-               break;
-       case IB_PORT_ACTIVE:
-               ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-               break;
-       default:
-               /* XXX We have already partially updated our state! */
-               goto err;
-       }
-
-       ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-       if (clientrereg)
-               pip->clientrereg_resv_subnetto |= 0x80;
-
-       goto done;
-
-err:
-       smp->status |= IB_SMP_INVALID_FIELD;
-       ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-done:
-       return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the infinipath device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct ipath_devdata *dd, u16 key)
-{
-       int i;
-       int ret;
-
-       for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (dd->ipath_pkeys[i] != key)
-                       continue;
-               if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
-                       dd->ipath_pkeys[i] = 0;
-                       ret = 1;
-                       goto bail;
-               }
-               break;
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the infinipath device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct ipath_devdata *dd, u16 key)
-{
-       int i;
-       u16 lkey = key & 0x7FFF;
-       int any = 0;
-       int ret;
-
-       if (lkey == 0x7FFF) {
-               ret = 0;
-               goto bail;
-       }
-
-       /* Look for an empty slot or a matching PKEY. */
-       for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i]) {
-                       any++;
-                       continue;
-               }
-               /* If it matches exactly, try to increment the ref count */
-               if (dd->ipath_pkeys[i] == key) {
-                       if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
-                               ret = 0;
-                               goto bail;
-                       }
-                       /* Lost the race. Look for an empty slot below. */
-                       atomic_dec(&dd->ipath_pkeyrefs[i]);
-                       any++;
-               }
-               /*
-                * It makes no sense to have both the limited and unlimited
-                * PKEY set at the same time since the unlimited one will
-                * disable the limited one.
-                */
-               if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-                       ret = -EEXIST;
-                       goto bail;
-               }
-       }
-       if (!any) {
-               ret = -EBUSY;
-               goto bail;
-       }
-       for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i] &&
-                   atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-                       /* for ipathstats, etc. */
-                       ipath_stats.sps_pkeys[i] = lkey;
-                       dd->ipath_pkeys[i] = key;
-                       ret = 1;
-                       goto bail;
-               }
-       }
-       ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
-{
-       struct ipath_portdata *pd;
-       int i;
-       int changed = 0;
-
-       /* always a kernel port, no locking needed */
-       pd = dd->ipath_pd[0];
-
-       for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-               u16 key = pkeys[i];
-               u16 okey = pd->port_pkeys[i];
-
-               if (key == okey)
-                       continue;
-               /*
-                * The value of this PKEY table entry is changing.
-                * Remove the old entry in the hardware's array of PKEYs.
-                */
-               if (okey & 0x7FFF)
-                       changed |= rm_pkey(dd, okey);
-               if (key & 0x7FFF) {
-                       int ret = add_pkey(dd, key);
-
-                       if (ret < 0)
-                               key = 0;
-                       else
-                               changed |= ret;
-               }
-               pd->port_pkeys[i] = key;
-       }
-       if (changed) {
-               u64 pkey;
-               struct ib_event event;
-
-               pkey = (u64) dd->ipath_pkeys[0] |
-                       ((u64) dd->ipath_pkeys[1] << 16) |
-                       ((u64) dd->ipath_pkeys[2] << 32) |
-                       ((u64) dd->ipath_pkeys[3] << 48);
-               ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
-                          (unsigned long long) pkey);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-                                pkey);
-
-               event.event = IB_EVENT_PKEY_CHANGE;
-               event.device = &dd->verbs_dev->ibdev;
-               event.element.port_num = port;
-               ib_dispatch_event(&event);
-       }
-       return 0;
-}
-
-static int recv_subn_set_pkeytable(struct ib_smp *smp,
-                                  struct ib_device *ibdev, u8 port)
-{
-       u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
-       __be16 *p = (__be16 *) smp->data;
-       u16 *q = (u16 *) smp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       unsigned i, n = ipath_get_npkeys(dev->dd);
-
-       for (i = 0; i < n; i++)
-               q[i] = be16_to_cpu(p[i]);
-
-       if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       return recv_subn_get_pkeytable(smp, ibdev);
-}
-
-static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
-{
-       struct ib_class_port_info *p =
-               (struct ib_class_port_info *)pmp->data;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       if (pmp->mad_hdr.attr_mod != 0)
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-       /* Indicate AllPortSelect is valid (only one port anyway) */
-       p->capability_mask = cpu_to_be16(1 << 8);
-       p->base_version = 1;
-       p->class_version = 1;
-       /*
-        * Expected response time is 4.096 usec. * 2^18 == 1.073741824
-        * sec.
-        */
-       p->resp_time_value = 18;
-
-       return reply((struct ib_smp *) pmp);
-}
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
-                                   COUNTER_MASK(1, 1) | \
-                                   COUNTER_MASK(1, 2) | \
-                                   COUNTER_MASK(1, 3) | \
-                                   COUNTER_MASK(1, 4))
-
-static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
-                                          struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portsamplescontrol *p =
-               (struct ib_pma_portsamplescontrol *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       unsigned long flags;
-       u8 port_select = p->port_select;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       p->port_select = port_select;
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (port_select != port && port_select != 0xFF))
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-       /*
-        * Ticks are 10x the link transfer period which for 2.5Gbs is 4
-        * nsec.  0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec.  Sample
-        * intervals are counted in ticks.  Since we use Linux timers, that
-        * count in jiffies, we can't sample for less than 1000 ticks if HZ
-        * == 1000 (4000 ticks if HZ is 250).  link_speed_active returns 2 for
-        * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
-        * have hardware support for delaying packets.
-        */
-       if (crp->cr_psstat)
-               p->tick = dev->dd->ipath_link_speed_active - 1;
-       else
-               p->tick = 250;          /* 1 usec. */
-       p->counter_width = 4;   /* 32 bit counters */
-       p->counter_mask0_9 = COUNTER_MASK0_9;
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       if (crp->cr_psstat)
-               p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               p->sample_status = dev->pma_sample_status;
-       p->sample_start = cpu_to_be32(dev->pma_sample_start);
-       p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
-       p->tag = cpu_to_be16(dev->pma_tag);
-       p->counter_select[0] = dev->pma_counter_select[0];
-       p->counter_select[1] = dev->pma_counter_select[1];
-       p->counter_select[2] = dev->pma_counter_select[2];
-       p->counter_select[3] = dev->pma_counter_select[3];
-       p->counter_select[4] = dev->pma_counter_select[4];
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
-                                          struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portsamplescontrol *p =
-               (struct ib_pma_portsamplescontrol *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       unsigned long flags;
-       u8 status;
-       int ret;
-
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (p->port_select != port && p->port_select != 0xFF)) {
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-               ret = reply((struct ib_smp *) pmp);
-               goto bail;
-       }
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       if (crp->cr_psstat)
-               status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               status = dev->pma_sample_status;
-       if (status == IB_PMA_SAMPLE_STATUS_DONE) {
-               dev->pma_sample_start = be32_to_cpu(p->sample_start);
-               dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
-               dev->pma_tag = be16_to_cpu(p->tag);
-               dev->pma_counter_select[0] = p->counter_select[0];
-               dev->pma_counter_select[1] = p->counter_select[1];
-               dev->pma_counter_select[2] = p->counter_select[2];
-               dev->pma_counter_select[3] = p->counter_select[3];
-               dev->pma_counter_select[4] = p->counter_select[4];
-               if (crp->cr_psstat) {
-                       ipath_write_creg(dev->dd, crp->cr_psinterval,
-                                        dev->pma_sample_interval);
-                       ipath_write_creg(dev->dd, crp->cr_psstart,
-                                        dev->pma_sample_start);
-               } else
-                       dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
-       return ret;
-}
-
-static u64 get_counter(struct ipath_ibdev *dev,
-                      struct ipath_cregs const *crp,
-                      __be16 sel)
-{
-       u64 ret;
-
-       switch (sel) {
-       case IB_PMA_PORT_XMIT_DATA:
-               ret = (crp->cr_psxmitdatacount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
-                       dev->ipath_sword;
-               break;
-       case IB_PMA_PORT_RCV_DATA:
-               ret = (crp->cr_psrcvdatacount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
-                       dev->ipath_rword;
-               break;
-       case IB_PMA_PORT_XMIT_PKTS:
-               ret = (crp->cr_psxmitpktscount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
-                       dev->ipath_spkts;
-               break;
-       case IB_PMA_PORT_RCV_PKTS:
-               ret = (crp->cr_psrcvpktscount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
-                       dev->ipath_rpkts;
-               break;
-       case IB_PMA_PORT_XMIT_WAIT:
-               ret = (crp->cr_psxmitwaitcount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
-                       dev->ipath_xmit_wait;
-               break;
-       default:
-               ret = 0;
-       }
-
-       return ret;
-}
-
-static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
-                                         struct ib_device *ibdev)
-{
-       struct ib_pma_portsamplesresult *p =
-               (struct ib_pma_portsamplesresult *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       u8 status;
-       int i;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-       p->tag = cpu_to_be16(dev->pma_tag);
-       if (crp->cr_psstat)
-               status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               status = dev->pma_sample_status;
-       p->sample_status = cpu_to_be16(status);
-       for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
-               p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-                   cpu_to_be32(
-                       get_counter(dev, crp, dev->pma_counter_select[i]));
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
-                                             struct ib_device *ibdev)
-{
-       struct ib_pma_portsamplesresult_ext *p =
-               (struct ib_pma_portsamplesresult_ext *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       u8 status;
-       int i;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-       p->tag = cpu_to_be16(dev->pma_tag);
-       if (crp->cr_psstat)
-               status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               status = dev->pma_sample_status;
-       p->sample_status = cpu_to_be16(status);
-       /* 64 bits */
-       p->extended_width = cpu_to_be32(0x80000000);
-       for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
-               p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-                   cpu_to_be64(
-                       get_counter(dev, crp, dev->pma_counter_select[i]));
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
-                                    struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-               pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_verbs_counters cntrs;
-       u8 port_select = p->port_select;
-
-       ipath_get_counters(dev->dd, &cntrs);
-
-       /* Adjust counters for any resets done. */
-       cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
-       cntrs.link_error_recovery_counter -=
-               dev->z_link_error_recovery_counter;
-       cntrs.link_downed_counter -= dev->z_link_downed_counter;
-       cntrs.port_rcv_errors += dev->rcv_errors;
-       cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
-       cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
-       cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
-       cntrs.port_xmit_data -= dev->z_port_xmit_data;
-       cntrs.port_rcv_data -= dev->z_port_rcv_data;
-       cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
-       cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
-       cntrs.local_link_integrity_errors -=
-               dev->z_local_link_integrity_errors;
-       cntrs.excessive_buffer_overrun_errors -=
-               dev->z_excessive_buffer_overrun_errors;
-       cntrs.vl15_dropped -= dev->z_vl15_dropped;
-       cntrs.vl15_dropped += dev->n_vl15_dropped;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       p->port_select = port_select;
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (port_select != port && port_select != 0xFF))
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-       if (cntrs.symbol_error_counter > 0xFFFFUL)
-               p->symbol_error_counter = cpu_to_be16(0xFFFF);
-       else
-               p->symbol_error_counter =
-                       cpu_to_be16((u16)cntrs.symbol_error_counter);
-       if (cntrs.link_error_recovery_counter > 0xFFUL)
-               p->link_error_recovery_counter = 0xFF;
-       else
-               p->link_error_recovery_counter =
-                       (u8)cntrs.link_error_recovery_counter;
-       if (cntrs.link_downed_counter > 0xFFUL)
-               p->link_downed_counter = 0xFF;
-       else
-               p->link_downed_counter = (u8)cntrs.link_downed_counter;
-       if (cntrs.port_rcv_errors > 0xFFFFUL)
-               p->port_rcv_errors = cpu_to_be16(0xFFFF);
-       else
-               p->port_rcv_errors =
-                       cpu_to_be16((u16) cntrs.port_rcv_errors);
-       if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
-               p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
-       else
-               p->port_rcv_remphys_errors =
-                       cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
-       if (cntrs.port_xmit_discards > 0xFFFFUL)
-               p->port_xmit_discards = cpu_to_be16(0xFFFF);
-       else
-               p->port_xmit_discards =
-                       cpu_to_be16((u16)cntrs.port_xmit_discards);
-       if (cntrs.local_link_integrity_errors > 0xFUL)
-               cntrs.local_link_integrity_errors = 0xFUL;
-       if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
-               cntrs.excessive_buffer_overrun_errors = 0xFUL;
-       p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
-               cntrs.excessive_buffer_overrun_errors;
-       if (cntrs.vl15_dropped > 0xFFFFUL)
-               p->vl15_dropped = cpu_to_be16(0xFFFF);
-       else
-               p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
-       if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
-               p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
-       if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
-               p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
-       if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
-               p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_xmit_packets =
-                       cpu_to_be32((u32)cntrs.port_xmit_packets);
-       if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
-               p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_rcv_packets =
-                       cpu_to_be32((u32) cntrs.port_rcv_packets);
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
-                                        struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters_ext *p =
-               (struct ib_pma_portcounters_ext *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       u64 swords, rwords, spkts, rpkts, xwait;
-       u8 port_select = p->port_select;
-
-       ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-                               &rpkts, &xwait);
-
-       /* Adjust counters for any resets done. */
-       swords -= dev->z_port_xmit_data;
-       rwords -= dev->z_port_rcv_data;
-       spkts -= dev->z_port_xmit_packets;
-       rpkts -= dev->z_port_rcv_packets;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       p->port_select = port_select;
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (port_select != port && port_select != 0xFF))
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-       p->port_xmit_data = cpu_to_be64(swords);
-       p->port_rcv_data = cpu_to_be64(rwords);
-       p->port_xmit_packets = cpu_to_be64(spkts);
-       p->port_rcv_packets = cpu_to_be64(rpkts);
-       p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
-       p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
-       p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
-       p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
-                                    struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-               pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_verbs_counters cntrs;
-
-       /*
-        * Since the HW doesn't support clearing counters, we save the
-        * current count and subtract it from future responses.
-        */
-       ipath_get_counters(dev->dd, &cntrs);
-
-       if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
-               dev->z_symbol_error_counter = cntrs.symbol_error_counter;
-
-       if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
-               dev->z_link_error_recovery_counter =
-                       cntrs.link_error_recovery_counter;
-
-       if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
-               dev->z_link_downed_counter = cntrs.link_downed_counter;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
-               dev->z_port_rcv_errors =
-                       cntrs.port_rcv_errors + dev->rcv_errors;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
-               dev->z_port_rcv_remphys_errors =
-                       cntrs.port_rcv_remphys_errors;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
-               dev->z_port_xmit_discards = cntrs.port_xmit_discards;
-
-       if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
-               dev->z_local_link_integrity_errors =
-                       cntrs.local_link_integrity_errors;
-
-       if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
-               dev->z_excessive_buffer_overrun_errors =
-                       cntrs.excessive_buffer_overrun_errors;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
-               dev->n_vl15_dropped = 0;
-               dev->z_vl15_dropped = cntrs.vl15_dropped;
-       }
-
-       if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
-               dev->z_port_xmit_data = cntrs.port_xmit_data;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
-               dev->z_port_rcv_data = cntrs.port_rcv_data;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
-               dev->z_port_xmit_packets = cntrs.port_xmit_packets;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
-               dev->z_port_rcv_packets = cntrs.port_rcv_packets;
-
-       return recv_pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
-                                        struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-               pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       u64 swords, rwords, spkts, rpkts, xwait;
-
-       ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-                               &rpkts, &xwait);
-
-       if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
-               dev->z_port_xmit_data = swords;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
-               dev->z_port_rcv_data = rwords;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
-               dev->z_port_xmit_packets = spkts;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
-               dev->z_port_rcv_packets = rpkts;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
-               dev->n_unicast_xmit = 0;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
-               dev->n_unicast_rcv = 0;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
-               dev->n_multicast_xmit = 0;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
-               dev->n_multicast_rcv = 0;
-
-       return recv_pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
-                       u8 port_num, const struct ib_mad *in_mad,
-                       struct ib_mad *out_mad)
-{
-       struct ib_smp *smp = (struct ib_smp *)out_mad;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       int ret;
-
-       *out_mad = *in_mad;
-       if (smp->class_version != 1) {
-               smp->status |= IB_SMP_UNSUP_VERSION;
-               ret = reply(smp);
-               goto bail;
-       }
-
-       /* Is the mkey in the process of expiring? */
-       if (dev->mkey_lease_timeout &&
-           time_after_eq(jiffies, dev->mkey_lease_timeout)) {
-               /* Clear timeout and mkey protection field. */
-               dev->mkey_lease_timeout = 0;
-               dev->mkeyprot = 0;
-       }
-
-       /*
-        * M_Key checking depends on
-        * Portinfo:M_Key_protect_bits
-        */
-       if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
-           dev->mkey != smp->mkey &&
-           (smp->method == IB_MGMT_METHOD_SET ||
-            (smp->method == IB_MGMT_METHOD_GET &&
-             dev->mkeyprot >= 2))) {
-               if (dev->mkey_violations != 0xFFFF)
-                       ++dev->mkey_violations;
-               if (dev->mkey_lease_timeout ||
-                   dev->mkey_lease_period == 0) {
-                       ret = IB_MAD_RESULT_SUCCESS |
-                               IB_MAD_RESULT_CONSUMED;
-                       goto bail;
-               }
-               dev->mkey_lease_timeout = jiffies +
-                       dev->mkey_lease_period * HZ;
-               /* Future: Generate a trap notice. */
-               ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-               goto bail;
-       } else if (dev->mkey_lease_timeout)
-               dev->mkey_lease_timeout = 0;
-
-       switch (smp->method) {
-       case IB_MGMT_METHOD_GET:
-               switch (smp->attr_id) {
-               case IB_SMP_ATTR_NODE_DESC:
-                       ret = recv_subn_get_nodedescription(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_NODE_INFO:
-                       ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_GUID_INFO:
-                       ret = recv_subn_get_guidinfo(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_PORT_INFO:
-                       ret = recv_subn_get_portinfo(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_PKEY_TABLE:
-                       ret = recv_subn_get_pkeytable(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_SM_INFO:
-                       if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
-                               ret = IB_MAD_RESULT_SUCCESS |
-                                       IB_MAD_RESULT_CONSUMED;
-                               goto bail;
-                       }
-                       if (dev->port_cap_flags & IB_PORT_SM) {
-                               ret = IB_MAD_RESULT_SUCCESS;
-                               goto bail;
-                       }
-                       /* FALLTHROUGH */
-               default:
-                       smp->status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply(smp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_SET:
-               switch (smp->attr_id) {
-               case IB_SMP_ATTR_GUID_INFO:
-                       ret = recv_subn_set_guidinfo(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_PORT_INFO:
-                       ret = recv_subn_set_portinfo(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_PKEY_TABLE:
-                       ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_SM_INFO:
-                       if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
-                               ret = IB_MAD_RESULT_SUCCESS |
-                                       IB_MAD_RESULT_CONSUMED;
-                               goto bail;
-                       }
-                       if (dev->port_cap_flags & IB_PORT_SM) {
-                               ret = IB_MAD_RESULT_SUCCESS;
-                               goto bail;
-                       }
-                       /* FALLTHROUGH */
-               default:
-                       smp->status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply(smp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_TRAP:
-       case IB_MGMT_METHOD_REPORT:
-       case IB_MGMT_METHOD_REPORT_RESP:
-       case IB_MGMT_METHOD_TRAP_REPRESS:
-       case IB_MGMT_METHOD_GET_RESP:
-               /*
-                * The ib_mad module will call us to process responses
-                * before checking for other consumers.
-                * Just tell the caller to process it normally.
-                */
-               ret = IB_MAD_RESULT_SUCCESS;
-               goto bail;
-       default:
-               smp->status |= IB_SMP_UNSUP_METHOD;
-               ret = reply(smp);
-       }
-
-bail:
-       return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port_num,
-                       const struct ib_mad *in_mad,
-                       struct ib_mad *out_mad)
-{
-       struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
-       int ret;
-
-       *out_mad = *in_mad;
-       if (pmp->mad_hdr.class_version != 1) {
-               pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
-               ret = reply((struct ib_smp *) pmp);
-               goto bail;
-       }
-
-       switch (pmp->mad_hdr.method) {
-       case IB_MGMT_METHOD_GET:
-               switch (pmp->mad_hdr.attr_id) {
-               case IB_PMA_CLASS_PORT_INFO:
-                       ret = recv_pma_get_classportinfo(pmp);
-                       goto bail;
-               case IB_PMA_PORT_SAMPLES_CONTROL:
-                       ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
-                                                             port_num);
-                       goto bail;
-               case IB_PMA_PORT_SAMPLES_RESULT:
-                       ret = recv_pma_get_portsamplesresult(pmp, ibdev);
-                       goto bail;
-               case IB_PMA_PORT_SAMPLES_RESULT_EXT:
-                       ret = recv_pma_get_portsamplesresult_ext(pmp,
-                                                                ibdev);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS:
-                       ret = recv_pma_get_portcounters(pmp, ibdev,
-                                                       port_num);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS_EXT:
-                       ret = recv_pma_get_portcounters_ext(pmp, ibdev,
-                                                           port_num);
-                       goto bail;
-               default:
-                       pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply((struct ib_smp *) pmp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_SET:
-               switch (pmp->mad_hdr.attr_id) {
-               case IB_PMA_PORT_SAMPLES_CONTROL:
-                       ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
-                                                             port_num);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS:
-                       ret = recv_pma_set_portcounters(pmp, ibdev,
-                                                       port_num);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS_EXT:
-                       ret = recv_pma_set_portcounters_ext(pmp, ibdev,
-                                                           port_num);
-                       goto bail;
-               default:
-                       pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply((struct ib_smp *) pmp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_GET_RESP:
-               /*
-                * The ib_mad module will call us to process responses
-                * before checking for other consumers.
-                * Just tell the caller to process it normally.
-                */
-               ret = IB_MAD_RESULT_SUCCESS;
-               goto bail;
-       default:
-               pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
-               ret = reply((struct ib_smp *) pmp);
-       }
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port_num: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                     const struct ib_mad_hdr *in, size_t in_mad_size,
-                     struct ib_mad_hdr *out, size_t *out_mad_size,
-                     u16 *out_mad_pkey_index)
-{
-       int ret;
-       const struct ib_mad *in_mad = (const struct ib_mad *)in;
-       struct ib_mad *out_mad = (struct ib_mad *)out;
-
-       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-                        *out_mad_size != sizeof(*out_mad)))
-               return IB_MAD_RESULT_FAILURE;
-
-       switch (in_mad->mad_hdr.mgmt_class) {
-       case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
-       case IB_MGMT_CLASS_SUBN_LID_ROUTED:
-               ret = process_subn(ibdev, mad_flags, port_num,
-                                  in_mad, out_mad);
-               goto bail;
-       case IB_MGMT_CLASS_PERF_MGMT:
-               ret = process_perf(ibdev, port_num, in_mad, out_mad);
-               goto bail;
-       default:
-               ret = IB_MAD_RESULT_SUCCESS;
-       }
-
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mmap.c b/drivers/staging/rdma/ipath/ipath_mmap.c
deleted file mode 100644 (file)
index e732742..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct ipath_mmap_info
- */
-void ipath_release_mmap_info(struct kref *ref)
-{
-       struct ipath_mmap_info *ip =
-               container_of(ref, struct ipath_mmap_info, ref);
-       struct ipath_ibdev *dev = to_idev(ip->context->device);
-
-       spin_lock_irq(&dev->pending_lock);
-       list_del(&ip->pending_mmaps);
-       spin_unlock_irq(&dev->pending_lock);
-
-       vfree(ip->obj);
-       kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void ipath_vma_open(struct vm_area_struct *vma)
-{
-       struct ipath_mmap_info *ip = vma->vm_private_data;
-
-       kref_get(&ip->ref);
-}
-
-static void ipath_vma_close(struct vm_area_struct *vma)
-{
-       struct ipath_mmap_info *ip = vma->vm_private_data;
-
-       kref_put(&ip->ref, ipath_release_mmap_info);
-}
-
-static const struct vm_operations_struct ipath_vm_ops = {
-       .open =     ipath_vma_open,
-       .close =    ipath_vma_close,
-};
-
-/**
- * ipath_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-       struct ipath_ibdev *dev = to_idev(context->device);
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-       unsigned long size = vma->vm_end - vma->vm_start;
-       struct ipath_mmap_info *ip, *pp;
-       int ret = -EINVAL;
-
-       /*
-        * Search the device's list of objects waiting for a mmap call.
-        * Normally, this list is very short since a call to create a
-        * CQ, QP, or SRQ is soon followed by a call to mmap().
-        */
-       spin_lock_irq(&dev->pending_lock);
-       list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
-                                pending_mmaps) {
-               /* Only the creator is allowed to mmap the object */
-               if (context != ip->context || (__u64) offset != ip->offset)
-                       continue;
-               /* Don't allow a mmap larger than the object. */
-               if (size > ip->size)
-                       break;
-
-               list_del_init(&ip->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-
-               ret = remap_vmalloc_range(vma, ip->obj, 0);
-               if (ret)
-                       goto done;
-               vma->vm_ops = &ipath_vm_ops;
-               vma->vm_private_data = ip;
-               ipath_vma_open(vma);
-               goto done;
-       }
-       spin_unlock_irq(&dev->pending_lock);
-done:
-       return ret;
-}
-
-/*
- * Allocate information for ipath_mmap
- */
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
-                                              u32 size,
-                                              struct ib_ucontext *context,
-                                              void *obj) {
-       struct ipath_mmap_info *ip;
-
-       ip = kmalloc(sizeof *ip, GFP_KERNEL);
-       if (!ip)
-               goto bail;
-
-       size = PAGE_ALIGN(size);
-
-       spin_lock_irq(&dev->mmap_offset_lock);
-       if (dev->mmap_offset == 0)
-               dev->mmap_offset = PAGE_SIZE;
-       ip->offset = dev->mmap_offset;
-       dev->mmap_offset += size;
-       spin_unlock_irq(&dev->mmap_offset_lock);
-
-       INIT_LIST_HEAD(&ip->pending_mmaps);
-       ip->size = size;
-       ip->context = context;
-       ip->obj = obj;
-       kref_init(&ip->ref);
-
-bail:
-       return ip;
-}
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
-                           struct ipath_mmap_info *ip,
-                           u32 size, void *obj) {
-       size = PAGE_ALIGN(size);
-
-       spin_lock_irq(&dev->mmap_offset_lock);
-       if (dev->mmap_offset == 0)
-               dev->mmap_offset = PAGE_SIZE;
-       ip->offset = dev->mmap_offset;
-       dev->mmap_offset += size;
-       spin_unlock_irq(&dev->mmap_offset_lock);
-
-       ip->size = size;
-       ip->obj = obj;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
deleted file mode 100644 (file)
index b76b0ce..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-
-/* Fast memory region */
-struct ipath_fmr {
-       struct ib_fmr ibfmr;
-       u8 page_shift;
-       struct ipath_mregion mr;        /* must be last */
-};
-
-static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
-       return container_of(ibfmr, struct ipath_fmr, ibfmr);
-}
-
-/**
- * ipath_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see ipath_dma.c).
- */
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
-{
-       struct ipath_mr *mr;
-       struct ib_mr *ret;
-
-       mr = kzalloc(sizeof *mr, GFP_KERNEL);
-       if (!mr) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       mr->mr.access_flags = acc;
-       ret = &mr->ibmr;
-
-bail:
-       return ret;
-}
-
-static struct ipath_mr *alloc_mr(int count,
-                                struct ipath_lkey_table *lk_table)
-{
-       struct ipath_mr *mr;
-       int m, i = 0;
-
-       /* Allocate struct plus pointers to first level page tables. */
-       m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
-       mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
-       if (!mr)
-               goto done;
-
-       /* Allocate first level page tables. */
-       for (; i < m; i++) {
-               mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
-               if (!mr->mr.map[i])
-                       goto bail;
-       }
-       mr->mr.mapsz = m;
-
-       if (!ipath_alloc_lkey(lk_table, &mr->mr))
-               goto bail;
-       mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
-
-       goto done;
-
-bail:
-       while (i) {
-               i--;
-               kfree(mr->mr.map[i]);
-       }
-       kfree(mr);
-       mr = NULL;
-
-done:
-       return mr;
-}
-
-/**
- * ipath_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @virt_addr: virtual address to use (from HCA's point of view)
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the InfiniPath driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                               u64 virt_addr, int mr_access_flags,
-                               struct ib_udata *udata)
-{
-       struct ipath_mr *mr;
-       struct ib_umem *umem;
-       int n, m, entry;
-       struct scatterlist *sg;
-       struct ib_mr *ret;
-
-       if (length == 0) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       umem = ib_umem_get(pd->uobject->context, start, length,
-                          mr_access_flags, 0);
-       if (IS_ERR(umem))
-               return (void *) umem;
-
-       n = umem->nmap;
-       mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
-       if (!mr) {
-               ret = ERR_PTR(-ENOMEM);
-               ib_umem_release(umem);
-               goto bail;
-       }
-
-       mr->mr.pd = pd;
-       mr->mr.user_base = start;
-       mr->mr.iova = virt_addr;
-       mr->mr.length = length;
-       mr->mr.offset = ib_umem_offset(umem);
-       mr->mr.access_flags = mr_access_flags;
-       mr->mr.max_segs = n;
-       mr->umem = umem;
-
-       m = 0;
-       n = 0;
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-               void *vaddr;
-
-               vaddr = page_address(sg_page(sg));
-               if (!vaddr) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto bail;
-               }
-               mr->mr.map[m]->segs[n].vaddr = vaddr;
-               mr->mr.map[m]->segs[n].length = umem->page_size;
-               n++;
-               if (n == IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       ret = &mr->ibmr;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by ipath_get_dma_mr()
- * or ipath_reg_user_mr().
- */
-int ipath_dereg_mr(struct ib_mr *ibmr)
-{
-       struct ipath_mr *mr = to_imr(ibmr);
-       int i;
-
-       ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
-       i = mr->mr.mapsz;
-       while (i) {
-               i--;
-               kfree(mr->mr.map[i]);
-       }
-
-       if (mr->umem)
-               ib_umem_release(mr->umem);
-
-       kfree(mr);
-       return 0;
-}
-
-/**
- * ipath_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-                              struct ib_fmr_attr *fmr_attr)
-{
-       struct ipath_fmr *fmr;
-       int m, i = 0;
-       struct ib_fmr *ret;
-
-       /* Allocate struct plus pointers to first level page tables. */
-       m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
-       fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
-       if (!fmr)
-               goto bail;
-
-       /* Allocate first level page tables. */
-       for (; i < m; i++) {
-               fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
-                                        GFP_KERNEL);
-               if (!fmr->mr.map[i])
-                       goto bail;
-       }
-       fmr->mr.mapsz = m;
-
-       /*
-        * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
-        * rkey.
-        */
-       if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
-               goto bail;
-       fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
-       /*
-        * Resources are allocated but no valid mapping (RKEY can't be
-        * used).
-        */
-       fmr->mr.pd = pd;
-       fmr->mr.user_base = 0;
-       fmr->mr.iova = 0;
-       fmr->mr.length = 0;
-       fmr->mr.offset = 0;
-       fmr->mr.access_flags = mr_access_flags;
-       fmr->mr.max_segs = fmr_attr->max_pages;
-       fmr->page_shift = fmr_attr->page_shift;
-
-       ret = &fmr->ibfmr;
-       goto done;
-
-bail:
-       while (i)
-               kfree(fmr->mr.map[--i]);
-       kfree(fmr);
-       ret = ERR_PTR(-ENOMEM);
-
-done:
-       return ret;
-}
-
-/**
- * ipath_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
-                      int list_len, u64 iova)
-{
-       struct ipath_fmr *fmr = to_ifmr(ibfmr);
-       struct ipath_lkey_table *rkt;
-       unsigned long flags;
-       int m, n, i;
-       u32 ps;
-       int ret;
-
-       if (list_len > fmr->mr.max_segs) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       rkt = &to_idev(ibfmr->device)->lk_table;
-       spin_lock_irqsave(&rkt->lock, flags);
-       fmr->mr.user_base = iova;
-       fmr->mr.iova = iova;
-       ps = 1 << fmr->page_shift;
-       fmr->mr.length = list_len * ps;
-       m = 0;
-       n = 0;
-       ps = 1 << fmr->page_shift;
-       for (i = 0; i < list_len; i++) {
-               fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
-               fmr->mr.map[m]->segs[n].length = ps;
-               if (++n == IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       spin_unlock_irqrestore(&rkt->lock, flags);
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int ipath_unmap_fmr(struct list_head *fmr_list)
-{
-       struct ipath_fmr *fmr;
-       struct ipath_lkey_table *rkt;
-       unsigned long flags;
-
-       list_for_each_entry(fmr, fmr_list, ibfmr.list) {
-               rkt = &to_idev(fmr->ibfmr.device)->lk_table;
-               spin_lock_irqsave(&rkt->lock, flags);
-               fmr->mr.user_base = 0;
-               fmr->mr.iova = 0;
-               fmr->mr.length = 0;
-               spin_unlock_irqrestore(&rkt->lock, flags);
-       }
-       return 0;
-}
-
-/**
- * ipath_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
-{
-       struct ipath_fmr *fmr = to_ifmr(ibfmr);
-       int i;
-
-       ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
-       i = fmr->mr.mapsz;
-       while (i)
-               kfree(fmr->mr.map[--i]);
-       kfree(fmr);
-       return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_qp.c b/drivers/staging/rdma/ipath/ipath_qp.c
deleted file mode 100644 (file)
index 280cd2d..0000000
+++ /dev/null
@@ -1,1079 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-#define BITS_PER_PAGE          (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK     (BITS_PER_PAGE-1)
-#define mk_qpn(qpt, map, off)  (((map) - (qpt)->map) * BITS_PER_PAGE + \
-                                (off))
-#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
-                                                     BITS_PER_PAGE, off)
-
-/*
- * Convert the AETH credit code into the number of credits.
- */
-static u32 credit_table[31] = {
-       0,                      /* 0 */
-       1,                      /* 1 */
-       2,                      /* 2 */
-       3,                      /* 3 */
-       4,                      /* 4 */
-       6,                      /* 5 */
-       8,                      /* 6 */
-       12,                     /* 7 */
-       16,                     /* 8 */
-       24,                     /* 9 */
-       32,                     /* A */
-       48,                     /* B */
-       64,                     /* C */
-       96,                     /* D */
-       128,                    /* E */
-       192,                    /* F */
-       256,                    /* 10 */
-       384,                    /* 11 */
-       512,                    /* 12 */
-       768,                    /* 13 */
-       1024,                   /* 14 */
-       1536,                   /* 15 */
-       2048,                   /* 16 */
-       3072,                   /* 17 */
-       4096,                   /* 18 */
-       6144,                   /* 19 */
-       8192,                   /* 1A */
-       12288,                  /* 1B */
-       16384,                  /* 1C */
-       24576,                  /* 1D */
-       32768                   /* 1E */
-};
-
-
-static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
-{
-       unsigned long page = get_zeroed_page(GFP_KERNEL);
-       unsigned long flags;
-
-       /*
-        * Free the page if someone raced with us installing it.
-        */
-
-       spin_lock_irqsave(&qpt->lock, flags);
-       if (map->page)
-               free_page(page);
-       else
-               map->page = (void *)page;
-       spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-
-static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
-{
-       u32 i, offset, max_scan, qpn;
-       struct qpn_map *map;
-       u32 ret = -1;
-
-       if (type == IB_QPT_SMI)
-               ret = 0;
-       else if (type == IB_QPT_GSI)
-               ret = 1;
-
-       if (ret != -1) {
-               map = &qpt->map[0];
-               if (unlikely(!map->page)) {
-                       get_map_page(qpt, map);
-                       if (unlikely(!map->page)) {
-                               ret = -ENOMEM;
-                               goto bail;
-                       }
-               }
-               if (!test_and_set_bit(ret, map->page))
-                       atomic_dec(&map->n_free);
-               else
-                       ret = -EBUSY;
-               goto bail;
-       }
-
-       qpn = qpt->last + 1;
-       if (qpn >= QPN_MAX)
-               qpn = 2;
-       offset = qpn & BITS_PER_PAGE_MASK;
-       map = &qpt->map[qpn / BITS_PER_PAGE];
-       max_scan = qpt->nmaps - !offset;
-       for (i = 0;;) {
-               if (unlikely(!map->page)) {
-                       get_map_page(qpt, map);
-                       if (unlikely(!map->page))
-                               break;
-               }
-               if (likely(atomic_read(&map->n_free))) {
-                       do {
-                               if (!test_and_set_bit(offset, map->page)) {
-                                       atomic_dec(&map->n_free);
-                                       qpt->last = qpn;
-                                       ret = qpn;
-                                       goto bail;
-                               }
-                               offset = find_next_offset(map, offset);
-                               qpn = mk_qpn(qpt, map, offset);
-                               /*
-                                * This test differs from alloc_pidmap().
-                                * If find_next_offset() does find a zero
-                                * bit, we don't need to check for QPN
-                                * wrapping around past our starting QPN.
-                                * We just need to be sure we don't loop
-                                * forever.
-                                */
-                       } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
-               }
-               /*
-                * In order to keep the number of pages allocated to a
-                * minimum, we scan the all existing pages before increasing
-                * the size of the bitmap table.
-                */
-               if (++i > max_scan) {
-                       if (qpt->nmaps == QPNMAP_ENTRIES)
-                               break;
-                       map = &qpt->map[qpt->nmaps++];
-                       offset = 0;
-               } else if (map < &qpt->map[qpt->nmaps]) {
-                       ++map;
-                       offset = 0;
-               } else {
-                       map = &qpt->map[0];
-                       offset = 2;
-               }
-               qpn = mk_qpn(qpt, map, offset);
-       }
-
-       ret = -ENOMEM;
-
-bail:
-       return ret;
-}
-
-static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
-       struct qpn_map *map;
-
-       map = qpt->map + qpn / BITS_PER_PAGE;
-       if (map->page)
-               clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-       atomic_inc(&map->n_free);
-}
-
-/**
- * ipath_alloc_qpn - allocate a QP number
- * @qpt: the QP table
- * @qp: the QP
- * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
- *
- * Allocate the next available QPN and put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
-                          enum ib_qp_type type)
-{
-       unsigned long flags;
-       int ret;
-
-       ret = alloc_qpn(qpt, type);
-       if (ret < 0)
-               goto bail;
-       qp->ibqp.qp_num = ret;
-
-       /* Add the QP to the hash table. */
-       spin_lock_irqsave(&qpt->lock, flags);
-
-       ret %= qpt->max;
-       qp->next = qpt->table[ret];
-       qpt->table[ret] = qp;
-       atomic_inc(&qp->refcount);
-
-       spin_unlock_irqrestore(&qpt->lock, flags);
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_free_qp - remove a QP from the QP table
- * @qpt: the QP table
- * @qp: the QP to remove
- *
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
-{
-       struct ipath_qp *q, **qpp;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qpt->lock, flags);
-
-       /* Remove QP from the hash table. */
-       qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
-       for (; (q = *qpp) != NULL; qpp = &q->next) {
-               if (q == qp) {
-                       *qpp = qp->next;
-                       qp->next = NULL;
-                       atomic_dec(&qp->refcount);
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-/**
- * ipath_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
- */
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
-{
-       unsigned long flags;
-       struct ipath_qp *qp;
-       u32 n, qp_inuse = 0;
-
-       spin_lock_irqsave(&qpt->lock, flags);
-       for (n = 0; n < qpt->max; n++) {
-               qp = qpt->table[n];
-               qpt->table[n] = NULL;
-
-               for (; qp; qp = qp->next)
-                       qp_inuse++;
-       }
-       spin_unlock_irqrestore(&qpt->lock, flags);
-
-       for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
-               if (qpt->map[n].page)
-                       free_page((unsigned long) qpt->map[n].page);
-       return qp_inuse;
-}
-
-/**
- * ipath_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
-       unsigned long flags;
-       struct ipath_qp *qp;
-
-       spin_lock_irqsave(&qpt->lock, flags);
-
-       for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
-               if (qp->ibqp.qp_num == qpn) {
-                       atomic_inc(&qp->refcount);
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&qpt->lock, flags);
-       return qp;
-}
-
-/**
- * ipath_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
-{
-       qp->remote_qpn = 0;
-       qp->qkey = 0;
-       qp->qp_access_flags = 0;
-       atomic_set(&qp->s_dma_busy, 0);
-       qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
-       qp->s_hdrwords = 0;
-       qp->s_wqe = NULL;
-       qp->s_pkt_delay = 0;
-       qp->s_draining = 0;
-       qp->s_psn = 0;
-       qp->r_psn = 0;
-       qp->r_msn = 0;
-       if (type == IB_QPT_RC) {
-               qp->s_state = IB_OPCODE_RC_SEND_LAST;
-               qp->r_state = IB_OPCODE_RC_SEND_LAST;
-       } else {
-               qp->s_state = IB_OPCODE_UC_SEND_LAST;
-               qp->r_state = IB_OPCODE_UC_SEND_LAST;
-       }
-       qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-       qp->r_nak_state = 0;
-       qp->r_aflags = 0;
-       qp->r_flags = 0;
-       qp->s_rnr_timeout = 0;
-       qp->s_head = 0;
-       qp->s_tail = 0;
-       qp->s_cur = 0;
-       qp->s_last = 0;
-       qp->s_ssn = 1;
-       qp->s_lsn = 0;
-       memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
-       qp->r_head_ack_queue = 0;
-       qp->s_tail_ack_queue = 0;
-       qp->s_num_rd_atomic = 0;
-       if (qp->r_rq.wq) {
-               qp->r_rq.wq->head = 0;
-               qp->r_rq.wq->tail = 0;
-       }
-}
-
-/**
- * ipath_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ib_wc wc;
-       int ret = 0;
-
-       if (qp->state == IB_QPS_ERR)
-               goto bail;
-
-       qp->state = IB_QPS_ERR;
-
-       spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->timerwait))
-               list_del_init(&qp->timerwait);
-       if (!list_empty(&qp->piowait))
-               list_del_init(&qp->piowait);
-       spin_unlock(&dev->pending_lock);
-
-       /* Schedule the sending tasklet to drain the send work queue. */
-       if (qp->s_last != qp->s_head)
-               ipath_schedule_send(qp);
-
-       memset(&wc, 0, sizeof(wc));
-       wc.qp = &qp->ibqp;
-       wc.opcode = IB_WC_RECV;
-
-       if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
-               wc.wr_id = qp->r_wr_id;
-               wc.status = err;
-               ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-       }
-       wc.status = IB_WC_WR_FLUSH_ERR;
-
-       if (qp->r_rq.wq) {
-               struct ipath_rwq *wq;
-               u32 head;
-               u32 tail;
-
-               spin_lock(&qp->r_rq.lock);
-
-               /* sanity check pointers before trusting them */
-               wq = qp->r_rq.wq;
-               head = wq->head;
-               if (head >= qp->r_rq.size)
-                       head = 0;
-               tail = wq->tail;
-               if (tail >= qp->r_rq.size)
-                       tail = 0;
-               while (tail != head) {
-                       wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
-                       if (++tail >= qp->r_rq.size)
-                               tail = 0;
-                       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-               }
-               wq->tail = tail;
-
-               spin_unlock(&qp->r_rq.lock);
-       } else if (qp->ibqp.event_handler)
-               ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for ipathverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                   int attr_mask, struct ib_udata *udata)
-{
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-       struct ipath_qp *qp = to_iqp(ibqp);
-       enum ib_qp_state cur_state, new_state;
-       int lastwqe = 0;
-       int ret;
-
-       spin_lock_irq(&qp->s_lock);
-
-       cur_state = attr_mask & IB_QP_CUR_STATE ?
-               attr->cur_qp_state : qp->state;
-       new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
-       if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
-                               attr_mask, IB_LINK_LAYER_UNSPECIFIED))
-               goto inval;
-
-       if (attr_mask & IB_QP_AV) {
-               if (attr->ah_attr.dlid == 0 ||
-                   attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
-                       goto inval;
-
-               if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
-                   (attr->ah_attr.grh.sgid_index > 1))
-                       goto inval;
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX)
-               if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
-                       goto inval;
-
-       if (attr_mask & IB_QP_MIN_RNR_TIMER)
-               if (attr->min_rnr_timer > 31)
-                       goto inval;
-
-       if (attr_mask & IB_QP_PORT)
-               if (attr->port_num == 0 ||
-                   attr->port_num > ibqp->device->phys_port_cnt)
-                       goto inval;
-
-       /*
-        * don't allow invalid Path MTU values or greater than 2048
-        * unless we are configured for a 4KB MTU
-        */
-       if ((attr_mask & IB_QP_PATH_MTU) &&
-               (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
-               (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
-               goto inval;
-
-       if (attr_mask & IB_QP_PATH_MIG_STATE)
-               if (attr->path_mig_state != IB_MIG_MIGRATED &&
-                   attr->path_mig_state != IB_MIG_REARM)
-                       goto inval;
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-               if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
-                       goto inval;
-
-       switch (new_state) {
-       case IB_QPS_RESET:
-               if (qp->state != IB_QPS_RESET) {
-                       qp->state = IB_QPS_RESET;
-                       spin_lock(&dev->pending_lock);
-                       if (!list_empty(&qp->timerwait))
-                               list_del_init(&qp->timerwait);
-                       if (!list_empty(&qp->piowait))
-                               list_del_init(&qp->piowait);
-                       spin_unlock(&dev->pending_lock);
-                       qp->s_flags &= ~IPATH_S_ANY_WAIT;
-                       spin_unlock_irq(&qp->s_lock);
-                       /* Stop the sending tasklet */
-                       tasklet_kill(&qp->s_task);
-                       wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-                       spin_lock_irq(&qp->s_lock);
-               }
-               ipath_reset_qp(qp, ibqp->qp_type);
-               break;
-
-       case IB_QPS_SQD:
-               qp->s_draining = qp->s_last != qp->s_cur;
-               qp->state = new_state;
-               break;
-
-       case IB_QPS_SQE:
-               if (qp->ibqp.qp_type == IB_QPT_RC)
-                       goto inval;
-               qp->state = new_state;
-               break;
-
-       case IB_QPS_ERR:
-               lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-               break;
-
-       default:
-               qp->state = new_state;
-               break;
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX)
-               qp->s_pkey_index = attr->pkey_index;
-
-       if (attr_mask & IB_QP_DEST_QPN)
-               qp->remote_qpn = attr->dest_qp_num;
-
-       if (attr_mask & IB_QP_SQ_PSN) {
-               qp->s_psn = qp->s_next_psn = attr->sq_psn;
-               qp->s_last_psn = qp->s_next_psn - 1;
-       }
-
-       if (attr_mask & IB_QP_RQ_PSN)
-               qp->r_psn = attr->rq_psn;
-
-       if (attr_mask & IB_QP_ACCESS_FLAGS)
-               qp->qp_access_flags = attr->qp_access_flags;
-
-       if (attr_mask & IB_QP_AV) {
-               qp->remote_ah_attr = attr->ah_attr;
-               qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
-       }
-
-       if (attr_mask & IB_QP_PATH_MTU)
-               qp->path_mtu = attr->path_mtu;
-
-       if (attr_mask & IB_QP_RETRY_CNT)
-               qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
-
-       if (attr_mask & IB_QP_RNR_RETRY) {
-               qp->s_rnr_retry = attr->rnr_retry;
-               if (qp->s_rnr_retry > 7)
-                       qp->s_rnr_retry = 7;
-               qp->s_rnr_retry_cnt = qp->s_rnr_retry;
-       }
-
-       if (attr_mask & IB_QP_MIN_RNR_TIMER)
-               qp->r_min_rnr_timer = attr->min_rnr_timer;
-
-       if (attr_mask & IB_QP_TIMEOUT)
-               qp->timeout = attr->timeout;
-
-       if (attr_mask & IB_QP_QKEY)
-               qp->qkey = attr->qkey;
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-               qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
-               qp->s_max_rd_atomic = attr->max_rd_atomic;
-
-       spin_unlock_irq(&qp->s_lock);
-
-       if (lastwqe) {
-               struct ib_event ev;
-
-               ev.device = qp->ibqp.device;
-               ev.element.qp = &qp->ibqp;
-               ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-               qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-       }
-       ret = 0;
-       goto bail;
-
-inval:
-       spin_unlock_irq(&qp->s_lock);
-       ret = -EINVAL;
-
-bail:
-       return ret;
-}
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                  int attr_mask, struct ib_qp_init_attr *init_attr)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-
-       attr->qp_state = qp->state;
-       attr->cur_qp_state = attr->qp_state;
-       attr->path_mtu = qp->path_mtu;
-       attr->path_mig_state = 0;
-       attr->qkey = qp->qkey;
-       attr->rq_psn = qp->r_psn;
-       attr->sq_psn = qp->s_next_psn;
-       attr->dest_qp_num = qp->remote_qpn;
-       attr->qp_access_flags = qp->qp_access_flags;
-       attr->cap.max_send_wr = qp->s_size - 1;
-       attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
-       attr->cap.max_send_sge = qp->s_max_sge;
-       attr->cap.max_recv_sge = qp->r_rq.max_sge;
-       attr->cap.max_inline_data = 0;
-       attr->ah_attr = qp->remote_ah_attr;
-       memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
-       attr->pkey_index = qp->s_pkey_index;
-       attr->alt_pkey_index = 0;
-       attr->en_sqd_async_notify = 0;
-       attr->sq_draining = qp->s_draining;
-       attr->max_rd_atomic = qp->s_max_rd_atomic;
-       attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
-       attr->min_rnr_timer = qp->r_min_rnr_timer;
-       attr->port_num = 1;
-       attr->timeout = qp->timeout;
-       attr->retry_cnt = qp->s_retry_cnt;
-       attr->rnr_retry = qp->s_rnr_retry_cnt;
-       attr->alt_port_num = 0;
-       attr->alt_timeout = 0;
-
-       init_attr->event_handler = qp->ibqp.event_handler;
-       init_attr->qp_context = qp->ibqp.qp_context;
-       init_attr->send_cq = qp->ibqp.send_cq;
-       init_attr->recv_cq = qp->ibqp.recv_cq;
-       init_attr->srq = qp->ibqp.srq;
-       init_attr->cap = attr->cap;
-       if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
-               init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
-       else
-               init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
-       init_attr->qp_type = qp->ibqp.qp_type;
-       init_attr->port_num = 1;
-       return 0;
-}
-
-/**
- * ipath_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 ipath_compute_aeth(struct ipath_qp *qp)
-{
-       u32 aeth = qp->r_msn & IPATH_MSN_MASK;
-
-       if (qp->ibqp.srq) {
-               /*
-                * Shared receive queues don't generate credits.
-                * Set the credit field to the invalid value.
-                */
-               aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
-       } else {
-               u32 min, max, x;
-               u32 credits;
-               struct ipath_rwq *wq = qp->r_rq.wq;
-               u32 head;
-               u32 tail;
-
-               /* sanity check pointers before trusting them */
-               head = wq->head;
-               if (head >= qp->r_rq.size)
-                       head = 0;
-               tail = wq->tail;
-               if (tail >= qp->r_rq.size)
-                       tail = 0;
-               /*
-                * Compute the number of credits available (RWQEs).
-                * XXX Not holding the r_rq.lock here so there is a small
-                * chance that the pair of reads are not atomic.
-                */
-               credits = head - tail;
-               if ((int)credits < 0)
-                       credits += qp->r_rq.size;
-               /*
-                * Binary search the credit table to find the code to
-                * use.
-                */
-               min = 0;
-               max = 31;
-               for (;;) {
-                       x = (min + max) / 2;
-                       if (credit_table[x] == credits)
-                               break;
-                       if (credit_table[x] > credits)
-                               max = x;
-                       else if (min == x)
-                               break;
-                       else
-                               min = x;
-               }
-               aeth |= x << IPATH_AETH_CREDIT_SHIFT;
-       }
-       return cpu_to_be32(aeth);
-}
-
-/**
- * ipath_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: unused by InfiniPath
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
-                             struct ib_qp_init_attr *init_attr,
-                             struct ib_udata *udata)
-{
-       struct ipath_qp *qp;
-       int err;
-       struct ipath_swqe *swq = NULL;
-       struct ipath_ibdev *dev;
-       size_t sz;
-       size_t sg_list_sz;
-       struct ib_qp *ret;
-
-       if (init_attr->create_flags) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
-           init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       /* Check receive queue parameters if no SRQ is specified. */
-       if (!init_attr->srq) {
-               if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
-                   init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto bail;
-               }
-               if (init_attr->cap.max_send_sge +
-                   init_attr->cap.max_send_wr +
-                   init_attr->cap.max_recv_sge +
-                   init_attr->cap.max_recv_wr == 0) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto bail;
-               }
-       }
-
-       switch (init_attr->qp_type) {
-       case IB_QPT_UC:
-       case IB_QPT_RC:
-       case IB_QPT_UD:
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               sz = sizeof(struct ipath_sge) *
-                       init_attr->cap.max_send_sge +
-                       sizeof(struct ipath_swqe);
-               swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
-               if (swq == NULL) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail;
-               }
-               sz = sizeof(*qp);
-               sg_list_sz = 0;
-               if (init_attr->srq) {
-                       struct ipath_srq *srq = to_isrq(init_attr->srq);
-
-                       if (srq->rq.max_sge > 1)
-                               sg_list_sz = sizeof(*qp->r_sg_list) *
-                                       (srq->rq.max_sge - 1);
-               } else if (init_attr->cap.max_recv_sge > 1)
-                       sg_list_sz = sizeof(*qp->r_sg_list) *
-                               (init_attr->cap.max_recv_sge - 1);
-               qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
-               if (!qp) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_swq;
-               }
-               if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
-                   init_attr->qp_type == IB_QPT_SMI ||
-                   init_attr->qp_type == IB_QPT_GSI)) {
-                       qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
-                       if (!qp->r_ud_sg_list) {
-                               ret = ERR_PTR(-ENOMEM);
-                               goto bail_qp;
-                       }
-               } else
-                       qp->r_ud_sg_list = NULL;
-               if (init_attr->srq) {
-                       sz = 0;
-                       qp->r_rq.size = 0;
-                       qp->r_rq.max_sge = 0;
-                       qp->r_rq.wq = NULL;
-                       init_attr->cap.max_recv_wr = 0;
-                       init_attr->cap.max_recv_sge = 0;
-               } else {
-                       qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
-                       qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
-                       sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
-                               sizeof(struct ipath_rwqe);
-                       qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
-                                             qp->r_rq.size * sz);
-                       if (!qp->r_rq.wq) {
-                               ret = ERR_PTR(-ENOMEM);
-                               goto bail_sg_list;
-                       }
-               }
-
-               /*
-                * ib_create_qp() will initialize qp->ibqp
-                * except for qp->ibqp.qp_num.
-                */
-               spin_lock_init(&qp->s_lock);
-               spin_lock_init(&qp->r_rq.lock);
-               atomic_set(&qp->refcount, 0);
-               init_waitqueue_head(&qp->wait);
-               init_waitqueue_head(&qp->wait_dma);
-               tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
-               INIT_LIST_HEAD(&qp->piowait);
-               INIT_LIST_HEAD(&qp->timerwait);
-               qp->state = IB_QPS_RESET;
-               qp->s_wq = swq;
-               qp->s_size = init_attr->cap.max_send_wr + 1;
-               qp->s_max_sge = init_attr->cap.max_send_sge;
-               if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
-                       qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
-               else
-                       qp->s_flags = 0;
-               dev = to_idev(ibpd->device);
-               err = ipath_alloc_qpn(&dev->qp_table, qp,
-                                     init_attr->qp_type);
-               if (err) {
-                       ret = ERR_PTR(err);
-                       vfree(qp->r_rq.wq);
-                       goto bail_sg_list;
-               }
-               qp->ip = NULL;
-               qp->s_tx = NULL;
-               ipath_reset_qp(qp, init_attr->qp_type);
-               break;
-
-       default:
-               /* Don't support raw QPs */
-               ret = ERR_PTR(-ENOSYS);
-               goto bail;
-       }
-
-       init_attr->cap.max_inline_data = 0;
-
-       /*
-        * Return the address of the RWQ as the offset to mmap.
-        * See ipath_mmap() for details.
-        */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               if (!qp->r_rq.wq) {
-                       __u64 offset = 0;
-
-                       err = ib_copy_to_udata(udata, &offset,
-                                              sizeof(offset));
-                       if (err) {
-                               ret = ERR_PTR(err);
-                               goto bail_ip;
-                       }
-               } else {
-                       u32 s = sizeof(struct ipath_rwq) +
-                               qp->r_rq.size * sz;
-
-                       qp->ip =
-                           ipath_create_mmap_info(dev, s,
-                                                  ibpd->uobject->context,
-                                                  qp->r_rq.wq);
-                       if (!qp->ip) {
-                               ret = ERR_PTR(-ENOMEM);
-                               goto bail_ip;
-                       }
-
-                       err = ib_copy_to_udata(udata, &(qp->ip->offset),
-                                              sizeof(qp->ip->offset));
-                       if (err) {
-                               ret = ERR_PTR(err);
-                               goto bail_ip;
-                       }
-               }
-       }
-
-       spin_lock(&dev->n_qps_lock);
-       if (dev->n_qps_allocated == ib_ipath_max_qps) {
-               spin_unlock(&dev->n_qps_lock);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_ip;
-       }
-
-       dev->n_qps_allocated++;
-       spin_unlock(&dev->n_qps_lock);
-
-       if (qp->ip) {
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       ret = &qp->ibqp;
-       goto bail;
-
-bail_ip:
-       if (qp->ip)
-               kref_put(&qp->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(qp->r_rq.wq);
-       ipath_free_qp(&dev->qp_table, qp);
-       free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-bail_sg_list:
-       kfree(qp->r_ud_sg_list);
-bail_qp:
-       kfree(qp);
-bail_swq:
-       vfree(swq);
-bail:
-       return ret;
-}
-
-/**
- * ipath_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int ipath_destroy_qp(struct ib_qp *ibqp)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-
-       /* Make sure HW and driver activity is stopped. */
-       spin_lock_irq(&qp->s_lock);
-       if (qp->state != IB_QPS_RESET) {
-               qp->state = IB_QPS_RESET;
-               spin_lock(&dev->pending_lock);
-               if (!list_empty(&qp->timerwait))
-                       list_del_init(&qp->timerwait);
-               if (!list_empty(&qp->piowait))
-                       list_del_init(&qp->piowait);
-               spin_unlock(&dev->pending_lock);
-               qp->s_flags &= ~IPATH_S_ANY_WAIT;
-               spin_unlock_irq(&qp->s_lock);
-               /* Stop the sending tasklet */
-               tasklet_kill(&qp->s_task);
-               wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-       } else
-               spin_unlock_irq(&qp->s_lock);
-
-       ipath_free_qp(&dev->qp_table, qp);
-
-       if (qp->s_tx) {
-               atomic_dec(&qp->refcount);
-               if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
-                       kfree(qp->s_tx->txreq.map_addr);
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
-               spin_unlock_irq(&dev->pending_lock);
-               qp->s_tx = NULL;
-       }
-
-       wait_event(qp->wait, !atomic_read(&qp->refcount));
-
-       /* all user's cleaned up, mark it available */
-       free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-       spin_lock(&dev->n_qps_lock);
-       dev->n_qps_allocated--;
-       spin_unlock(&dev->n_qps_lock);
-
-       if (qp->ip)
-               kref_put(&qp->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(qp->r_rq.wq);
-       kfree(qp->r_ud_sg_list);
-       vfree(qp->s_wq);
-       kfree(qp);
-       return 0;
-}
-
-/**
- * ipath_init_qp_table - initialize the QP table for a device
- * @idev: the device who's QP table we're initializing
- * @size: the size of the QP table
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
-{
-       int i;
-       int ret;
-
-       idev->qp_table.last = 1;        /* QPN 0 and 1 are special. */
-       idev->qp_table.max = size;
-       idev->qp_table.nmaps = 1;
-       idev->qp_table.table = kcalloc(size, sizeof(*idev->qp_table.table),
-                                      GFP_KERNEL);
-       if (idev->qp_table.table == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
-               atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
-               idev->qp_table.map[i].page = NULL;
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
-{
-       u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
-
-       /*
-        * If the credit is invalid, we can send
-        * as many packets as we like.  Otherwise, we have to
-        * honor the credit field.
-        */
-       if (credit == IPATH_AETH_CREDIT_INVAL)
-               qp->s_lsn = (u32) -1;
-       else if (qp->s_lsn != (u32) -1) {
-               /* Compute new LSN (i.e., MSN + credit) */
-               credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
-               if (ipath_cmp24(credit, qp->s_lsn) > 0)
-                       qp->s_lsn = credit;
-       }
-
-       /* Restart sending if it was blocked due to lack of credits. */
-       if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
-           qp->s_cur != qp->s_head &&
-           (qp->s_lsn == (u32) -1 ||
-            ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
-                        qp->s_lsn + 1) <= 0))
-               ipath_schedule_send(qp);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
deleted file mode 100644 (file)
index d4aa535..0000000
+++ /dev/null
@@ -1,1969 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
-                      u32 psn, u32 pmtu)
-{
-       u32 len;
-
-       len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
-       ss->sge = wqe->sg_list[0];
-       ss->sg_list = wqe->sg_list + 1;
-       ss->num_sge = wqe->wr.num_sge;
-       ipath_skip_sge(ss, len);
-       return wqe->length - len;
-}
-
-/**
- * ipath_init_restart- initialize the qp->s_sge after a restart
- * @qp: the QP who's SGE we're restarting
- * @wqe: the work queue to initialize the QP's SGE from
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
-{
-       struct ipath_ibdev *dev;
-
-       qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
-                               ib_mtu_enum_to_int(qp->path_mtu));
-       dev = to_idev(qp->ibqp.device);
-       spin_lock(&dev->pending_lock);
-       if (list_empty(&qp->timerwait))
-               list_add_tail(&qp->timerwait,
-                             &dev->pending[dev->pending_index]);
-       spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
-                            struct ipath_other_headers *ohdr, u32 pmtu)
-{
-       struct ipath_ack_entry *e;
-       u32 hwords;
-       u32 len;
-       u32 bth0;
-       u32 bth2;
-
-       /* Don't send an ACK if we aren't supposed to. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-               goto bail;
-
-       /* header size in 32-bit words LRH+BTH = (8+12)/4. */
-       hwords = 5;
-
-       switch (qp->s_ack_state) {
-       case OP(RDMA_READ_RESPONSE_LAST):
-       case OP(RDMA_READ_RESPONSE_ONLY):
-       case OP(ATOMIC_ACKNOWLEDGE):
-               /*
-                * We can increment the tail pointer now that the last
-                * response has been sent instead of only being
-                * constructed.
-                */
-               if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
-                       qp->s_tail_ack_queue = 0;
-               /* FALLTHROUGH */
-       case OP(SEND_ONLY):
-       case OP(ACKNOWLEDGE):
-               /* Check for no next entry in the queue. */
-               if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
-                       if (qp->s_flags & IPATH_S_ACK_PENDING)
-                               goto normal;
-                       qp->s_ack_state = OP(ACKNOWLEDGE);
-                       goto bail;
-               }
-
-               e = &qp->s_ack_queue[qp->s_tail_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST)) {
-                       /* Copy SGE state in case we need to resend */
-                       qp->s_ack_rdma_sge = e->rdma_sge;
-                       qp->s_cur_sge = &qp->s_ack_rdma_sge;
-                       len = e->rdma_sge.sge.sge_length;
-                       if (len > pmtu) {
-                               len = pmtu;
-                               qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
-                       } else {
-                               qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
-                               e->sent = 1;
-                       }
-                       ohdr->u.aeth = ipath_compute_aeth(qp);
-                       hwords++;
-                       qp->s_ack_rdma_psn = e->psn;
-                       bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
-               } else {
-                       /* COMPARE_SWAP or FETCH_ADD */
-                       qp->s_cur_sge = NULL;
-                       len = 0;
-                       qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
-                       ohdr->u.at.aeth = ipath_compute_aeth(qp);
-                       ohdr->u.at.atomic_ack_eth[0] =
-                               cpu_to_be32(e->atomic_data >> 32);
-                       ohdr->u.at.atomic_ack_eth[1] =
-                               cpu_to_be32(e->atomic_data);
-                       hwords += sizeof(ohdr->u.at) / sizeof(u32);
-                       bth2 = e->psn;
-                       e->sent = 1;
-               }
-               bth0 = qp->s_ack_state << 24;
-               break;
-
-       case OP(RDMA_READ_RESPONSE_FIRST):
-               qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(RDMA_READ_RESPONSE_MIDDLE):
-               len = qp->s_ack_rdma_sge.sge.sge_length;
-               if (len > pmtu)
-                       len = pmtu;
-               else {
-                       ohdr->u.aeth = ipath_compute_aeth(qp);
-                       hwords++;
-                       qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
-                       qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
-               }
-               bth0 = qp->s_ack_state << 24;
-               bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
-               break;
-
-       default:
-       normal:
-               /*
-                * Send a regular ACK.
-                * Set the s_ack_state so we wait until after sending
-                * the ACK before setting s_ack_state to ACKNOWLEDGE
-                * (see above).
-                */
-               qp->s_ack_state = OP(SEND_ONLY);
-               qp->s_flags &= ~IPATH_S_ACK_PENDING;
-               qp->s_cur_sge = NULL;
-               if (qp->s_nak_state)
-                       ohdr->u.aeth =
-                               cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
-                                           (qp->s_nak_state <<
-                                            IPATH_AETH_CREDIT_SHIFT));
-               else
-                       ohdr->u.aeth = ipath_compute_aeth(qp);
-               hwords++;
-               len = 0;
-               bth0 = OP(ACKNOWLEDGE) << 24;
-               bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
-       }
-       qp->s_hdrwords = hwords;
-       qp->s_cur_size = len;
-       ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
-       return 1;
-
-bail:
-       return 0;
-}
-
-/**
- * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_rc_req(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_other_headers *ohdr;
-       struct ipath_sge_state *ss;
-       struct ipath_swqe *wqe;
-       u32 hwords;
-       u32 len;
-       u32 bth0;
-       u32 bth2;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       char newreq;
-       unsigned long flags;
-       int ret = 0;
-
-       ohdr = &qp->s_hdr.u.oth;
-       if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-               ohdr = &qp->s_hdr.u.l.oth;
-
-       /*
-        * The lock is needed to synchronize between the sending tasklet,
-        * the receive interrupt handler, and timeout resends.
-        */
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Sending responses has higher priority over sending requests. */
-       if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
-            (qp->s_flags & IPATH_S_ACK_PENDING) ||
-            qp->s_ack_state != OP(ACKNOWLEDGE)) &&
-           ipath_make_rc_ack(dev, qp, ohdr, pmtu))
-               goto done;
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == qp->s_head)
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
-                       qp->s_flags |= IPATH_S_WAIT_DMA;
-                       goto bail;
-               }
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-               goto done;
-       }
-
-       /* Leave BUSY set until RNR timeout. */
-       if (qp->s_rnr_timeout) {
-               qp->s_flags |= IPATH_S_WAITING;
-               goto bail;
-       }
-
-       /* header size in 32-bit words LRH+BTH = (8+12)/4. */
-       hwords = 5;
-       bth0 = 1 << 22; /* Set M bit */
-
-       /* Send a request. */
-       wqe = get_swqe_ptr(qp, qp->s_cur);
-       switch (qp->s_state) {
-       default:
-               if (!(ib_ipath_state_ops[qp->state] &
-                   IPATH_PROCESS_NEXT_SEND_OK))
-                       goto bail;
-               /*
-                * Resend an old request or start a new one.
-                *
-                * We keep track of the current SWQE so that
-                * we don't reset the "furthest progress" state
-                * if we need to back up.
-                */
-               newreq = 0;
-               if (qp->s_cur == qp->s_tail) {
-                       /* Check if send work queue is empty. */
-                       if (qp->s_tail == qp->s_head)
-                               goto bail;
-                       /*
-                        * If a fence is requested, wait for previous
-                        * RDMA read and atomic operations to finish.
-                        */
-                       if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
-                           qp->s_num_rd_atomic) {
-                               qp->s_flags |= IPATH_S_FENCE_PENDING;
-                               goto bail;
-                       }
-                       wqe->psn = qp->s_next_psn;
-                       newreq = 1;
-               }
-               /*
-                * Note that we have to be careful not to modify the
-                * original work request since we may need to resend
-                * it.
-                */
-               len = wqe->length;
-               ss = &qp->s_sge;
-               bth2 = 0;
-               switch (wqe->wr.opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
-                       /* If no credit, return. */
-                       if (qp->s_lsn != (u32) -1 &&
-                           ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
-                               qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
-                               goto bail;
-                       }
-                       wqe->lpsn = wqe->psn;
-                       if (len > pmtu) {
-                               wqe->lpsn += (len - 1) / pmtu;
-                               qp->s_state = OP(SEND_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_SEND)
-                               qp->s_state = OP(SEND_ONLY);
-                       else {
-                               qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after the BTH */
-                               ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                       }
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-                       bth2 = 1 << 31; /* Request ACK. */
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_RDMA_WRITE:
-                       if (newreq && qp->s_lsn != (u32) -1)
-                               qp->s_lsn++;
-                       /* FALLTHROUGH */
-               case IB_WR_RDMA_WRITE_WITH_IMM:
-                       /* If no credit, return. */
-                       if (qp->s_lsn != (u32) -1 &&
-                           ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
-                               qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
-                               goto bail;
-                       }
-                       ohdr->u.rc.reth.vaddr =
-                               cpu_to_be64(wqe->rdma_wr.remote_addr);
-                       ohdr->u.rc.reth.rkey =
-                               cpu_to_be32(wqe->rdma_wr.rkey);
-                       ohdr->u.rc.reth.length = cpu_to_be32(len);
-                       hwords += sizeof(struct ib_reth) / sizeof(u32);
-                       wqe->lpsn = wqe->psn;
-                       if (len > pmtu) {
-                               wqe->lpsn += (len - 1) / pmtu;
-                               qp->s_state = OP(RDMA_WRITE_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                               qp->s_state = OP(RDMA_WRITE_ONLY);
-                       else {
-                               qp->s_state =
-                                       OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after RETH */
-                               ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                                       bth0 |= 1 << 23;
-                       }
-                       bth2 = 1 << 31; /* Request ACK. */
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_RDMA_READ:
-                       /*
-                        * Don't allow more operations to be started
-                        * than the QP limits allow.
-                        */
-                       if (newreq) {
-                               if (qp->s_num_rd_atomic >=
-                                   qp->s_max_rd_atomic) {
-                                       qp->s_flags |= IPATH_S_RDMAR_PENDING;
-                                       goto bail;
-                               }
-                               qp->s_num_rd_atomic++;
-                               if (qp->s_lsn != (u32) -1)
-                                       qp->s_lsn++;
-                               /*
-                                * Adjust s_next_psn to count the
-                                * expected number of responses.
-                                */
-                               if (len > pmtu)
-                                       qp->s_next_psn += (len - 1) / pmtu;
-                               wqe->lpsn = qp->s_next_psn++;
-                       }
-                       ohdr->u.rc.reth.vaddr =
-                               cpu_to_be64(wqe->rdma_wr.remote_addr);
-                       ohdr->u.rc.reth.rkey =
-                               cpu_to_be32(wqe->rdma_wr.rkey);
-                       ohdr->u.rc.reth.length = cpu_to_be32(len);
-                       qp->s_state = OP(RDMA_READ_REQUEST);
-                       hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
-                       ss = NULL;
-                       len = 0;
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_ATOMIC_CMP_AND_SWP:
-               case IB_WR_ATOMIC_FETCH_AND_ADD:
-                       /*
-                        * Don't allow more operations to be started
-                        * than the QP limits allow.
-                        */
-                       if (newreq) {
-                               if (qp->s_num_rd_atomic >=
-                                   qp->s_max_rd_atomic) {
-                                       qp->s_flags |= IPATH_S_RDMAR_PENDING;
-                                       goto bail;
-                               }
-                               qp->s_num_rd_atomic++;
-                               if (qp->s_lsn != (u32) -1)
-                                       qp->s_lsn++;
-                               wqe->lpsn = wqe->psn;
-                       }
-                       if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-                               qp->s_state = OP(COMPARE_SWAP);
-                               ohdr->u.atomic_eth.swap_data = cpu_to_be64(
-                                       wqe->atomic_wr.swap);
-                               ohdr->u.atomic_eth.compare_data = cpu_to_be64(
-                                       wqe->atomic_wr.compare_add);
-                       } else {
-                               qp->s_state = OP(FETCH_ADD);
-                               ohdr->u.atomic_eth.swap_data = cpu_to_be64(
-                                       wqe->atomic_wr.compare_add);
-                               ohdr->u.atomic_eth.compare_data = 0;
-                       }
-                       ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
-                               wqe->atomic_wr.remote_addr >> 32);
-                       ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
-                               wqe->atomic_wr.remote_addr);
-                       ohdr->u.atomic_eth.rkey = cpu_to_be32(
-                               wqe->atomic_wr.rkey);
-                       hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
-                       ss = NULL;
-                       len = 0;
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               default:
-                       goto bail;
-               }
-               qp->s_sge.sge = wqe->sg_list[0];
-               qp->s_sge.sg_list = wqe->sg_list + 1;
-               qp->s_sge.num_sge = wqe->wr.num_sge;
-               qp->s_len = wqe->length;
-               if (newreq) {
-                       qp->s_tail++;
-                       if (qp->s_tail >= qp->s_size)
-                               qp->s_tail = 0;
-               }
-               bth2 |= qp->s_psn & IPATH_PSN_MASK;
-               if (wqe->wr.opcode == IB_WR_RDMA_READ)
-                       qp->s_psn = wqe->lpsn + 1;
-               else {
-                       qp->s_psn++;
-                       if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-                               qp->s_next_psn = qp->s_psn;
-               }
-               /*
-                * Put the QP on the pending list so lost ACKs will cause
-                * a retry.  More than one request can be pending so the
-                * QP may already be on the dev->pending list.
-                */
-               spin_lock(&dev->pending_lock);
-               if (list_empty(&qp->timerwait))
-                       list_add_tail(&qp->timerwait,
-                                     &dev->pending[dev->pending_index]);
-               spin_unlock(&dev->pending_lock);
-               break;
-
-       case OP(RDMA_READ_RESPONSE_FIRST):
-               /*
-                * This case can only happen if a send is restarted.
-                * See ipath_restart_rc().
-                */
-               ipath_init_restart(qp, wqe);
-               /* FALLTHROUGH */
-       case OP(SEND_FIRST):
-               qp->s_state = OP(SEND_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
-               if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-                       qp->s_next_psn = qp->s_psn;
-               ss = &qp->s_sge;
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_SEND)
-                       qp->s_state = OP(SEND_LAST);
-               else {
-                       qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-               }
-               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                       bth0 |= 1 << 23;
-               bth2 |= 1 << 31;        /* Request ACK. */
-               qp->s_cur++;
-               if (qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-
-       case OP(RDMA_READ_RESPONSE_LAST):
-               /*
-                * This case can only happen if a RDMA write is restarted.
-                * See ipath_restart_rc().
-                */
-               ipath_init_restart(qp, wqe);
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_FIRST):
-               qp->s_state = OP(RDMA_WRITE_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_MIDDLE):
-               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
-               if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-                       qp->s_next_psn = qp->s_psn;
-               ss = &qp->s_sge;
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                       qp->s_state = OP(RDMA_WRITE_LAST);
-               else {
-                       qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-               }
-               bth2 |= 1 << 31;        /* Request ACK. */
-               qp->s_cur++;
-               if (qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-
-       case OP(RDMA_READ_RESPONSE_MIDDLE):
-               /*
-                * This case can only happen if a RDMA read is restarted.
-                * See ipath_restart_rc().
-                */
-               ipath_init_restart(qp, wqe);
-               len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
-               ohdr->u.rc.reth.vaddr =
-                       cpu_to_be64(wqe->rdma_wr.remote_addr + len);
-               ohdr->u.rc.reth.rkey =
-                       cpu_to_be32(wqe->rdma_wr.rkey);
-               ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
-               qp->s_state = OP(RDMA_READ_REQUEST);
-               hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
-               bth2 = qp->s_psn & IPATH_PSN_MASK;
-               qp->s_psn = wqe->lpsn + 1;
-               ss = NULL;
-               len = 0;
-               qp->s_cur++;
-               if (qp->s_cur == qp->s_size)
-                       qp->s_cur = 0;
-               break;
-       }
-       if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
-               bth2 |= 1 << 31;        /* Request ACK. */
-       qp->s_len -= len;
-       qp->s_hdrwords = hwords;
-       qp->s_cur_sge = ss;
-       qp->s_cur_size = len;
-       ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
-       ret = 1;
-       goto unlock;
-
-bail:
-       qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from ipath_rc_rcv() and only uses the receive
- * side QP state.
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-static void send_rc_ack(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_devdata *dd;
-       u16 lrh0;
-       u32 bth0;
-       u32 hwords;
-       u32 __iomem *piobuf;
-       struct ipath_ib_header hdr;
-       struct ipath_other_headers *ohdr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
-       if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
-           (qp->s_flags & IPATH_S_ACK_PENDING) ||
-           qp->s_ack_state != OP(ACKNOWLEDGE))
-               goto queue_ack;
-
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
-       /* Don't try to send ACKs if the link isn't ACTIVE */
-       dd = dev->dd;
-       if (!(dd->ipath_flags & IPATH_LINKACTIVE))
-               goto done;
-
-       piobuf = ipath_getpiobuf(dd, 0, NULL);
-       if (!piobuf) {
-               /*
-                * We are out of PIO buffers at the moment.
-                * Pass responsibility for sending the ACK to the
-                * send tasklet so that when a PIO buffer becomes
-                * available, the ACK is sent ahead of other outgoing
-                * packets.
-                */
-               spin_lock_irqsave(&qp->s_lock, flags);
-               goto queue_ack;
-       }
-
-       /* Construct the header. */
-       ohdr = &hdr.u.oth;
-       lrh0 = IPATH_LRH_BTH;
-       /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
-       hwords = 6;
-       if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-               hwords += ipath_make_grh(dev, &hdr.u.l.grh,
-                                        &qp->remote_ah_attr.grh,
-                                        hwords, 0);
-               ohdr = &hdr.u.l.oth;
-               lrh0 = IPATH_LRH_GRH;
-       }
-       /* read pkey_index w/o lock (its atomic) */
-       bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
-               (OP(ACKNOWLEDGE) << 24) | (1 << 22);
-       if (qp->r_nak_state)
-               ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
-                                           (qp->r_nak_state <<
-                                            IPATH_AETH_CREDIT_SHIFT));
-       else
-               ohdr->u.aeth = ipath_compute_aeth(qp);
-       lrh0 |= qp->remote_ah_attr.sl << 4;
-       hdr.lrh[0] = cpu_to_be16(lrh0);
-       hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-       hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
-       hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
-                                qp->remote_ah_attr.src_path_bits);
-       ohdr->bth[0] = cpu_to_be32(bth0);
-       ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
-
-       writeq(hwords + 1, piobuf);
-
-       if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
-               u32 *hdrp = (u32 *) &hdr;
-
-               ipath_flush_wc();
-               __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
-               ipath_flush_wc();
-               __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
-       } else
-               __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
-       ipath_flush_wc();
-
-       dev->n_unicast_xmit++;
-       goto done;
-
-queue_ack:
-       if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) {
-               dev->n_rc_qacks++;
-               qp->s_flags |= IPATH_S_ACK_PENDING;
-               qp->s_nak_state = qp->r_nak_state;
-               qp->s_ack_psn = qp->r_ack_psn;
-
-               /* Schedule the send tasklet. */
-               ipath_schedule_send(qp);
-       }
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-       return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
-       u32 n = qp->s_last;
-       struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
-       u32 opcode;
-
-       qp->s_cur = n;
-
-       /*
-        * If we are starting the request from the beginning,
-        * let the normal send code handle initialization.
-        */
-       if (ipath_cmp24(psn, wqe->psn) <= 0) {
-               qp->s_state = OP(SEND_LAST);
-               goto done;
-       }
-
-       /* Find the work request opcode corresponding to the given PSN. */
-       opcode = wqe->wr.opcode;
-       for (;;) {
-               int diff;
-
-               if (++n == qp->s_size)
-                       n = 0;
-               if (n == qp->s_tail)
-                       break;
-               wqe = get_swqe_ptr(qp, n);
-               diff = ipath_cmp24(psn, wqe->psn);
-               if (diff < 0)
-                       break;
-               qp->s_cur = n;
-               /*
-                * If we are starting the request from the beginning,
-                * let the normal send code handle initialization.
-                */
-               if (diff == 0) {
-                       qp->s_state = OP(SEND_LAST);
-                       goto done;
-               }
-               opcode = wqe->wr.opcode;
-       }
-
-       /*
-        * Set the state to restart in the middle of a request.
-        * Don't change the s_sge, s_cur_sge, or s_cur_size.
-        * See ipath_make_rc_req().
-        */
-       switch (opcode) {
-       case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
-               qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-               break;
-
-       case IB_WR_RDMA_WRITE:
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-               break;
-
-       case IB_WR_RDMA_READ:
-               qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-               break;
-
-       default:
-               /*
-                * This case shouldn't happen since its only
-                * one PSN per req.
-                */
-               qp->s_state = OP(SEND_LAST);
-       }
-done:
-       qp->s_psn = psn;
-}
-
-/**
- * ipath_restart_rc - back up requester to resend the last un-ACKed request
- * @qp: the QP to restart
- * @psn: packet sequence number for the request
- * @wc: the work completion request
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
-{
-       struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-       struct ipath_ibdev *dev;
-
-       if (qp->s_retry == 0) {
-               ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
-               ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-               goto bail;
-       }
-       qp->s_retry--;
-
-       /*
-        * Remove the QP from the timeout queue.
-        * Note: it may already have been removed by ipath_ib_timer().
-        */
-       dev = to_idev(qp->ibqp.device);
-       spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->timerwait))
-               list_del_init(&qp->timerwait);
-       if (!list_empty(&qp->piowait))
-               list_del_init(&qp->piowait);
-       spin_unlock(&dev->pending_lock);
-
-       if (wqe->wr.opcode == IB_WR_RDMA_READ)
-               dev->n_rc_resends++;
-       else
-               dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
-
-       reset_psn(qp, psn);
-       ipath_schedule_send(qp);
-
-bail:
-       return;
-}
-
-static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
-{
-       qp->s_last_psn = psn;
-}
-
-/**
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held and interrupts disabled.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
-                    u64 val)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ib_wc wc;
-       enum ib_wc_status status;
-       struct ipath_swqe *wqe;
-       int ret = 0;
-       u32 ack_psn;
-       int diff;
-
-       /*
-        * Remove the QP from the timeout queue (or RNR timeout queue).
-        * If ipath_ib_timer() has already removed it,
-        * it's OK since we hold the QP s_lock and ipath_restart_rc()
-        * just won't find anything to restart if we ACK everything.
-        */
-       spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->timerwait))
-               list_del_init(&qp->timerwait);
-       spin_unlock(&dev->pending_lock);
-
-       /*
-        * Note that NAKs implicitly ACK outstanding SEND and RDMA write
-        * requests and implicitly NAK RDMA read and atomic requests issued
-        * before the NAK'ed request.  The MSN won't include the NAK'ed
-        * request but will include an ACK'ed request(s).
-        */
-       ack_psn = psn;
-       if (aeth >> 29)
-               ack_psn--;
-       wqe = get_swqe_ptr(qp, qp->s_last);
-
-       /*
-        * The MSN might be for a later WQE than the PSN indicates so
-        * only complete WQEs that the PSN finishes.
-        */
-       while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
-               /*
-                * RDMA_READ_RESPONSE_ONLY is a special case since
-                * we want to generate completion events for everything
-                * before the RDMA read, copy the data, then generate
-                * the completion for the read.
-                */
-               if (wqe->wr.opcode == IB_WR_RDMA_READ &&
-                   opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
-                   diff == 0) {
-                       ret = 1;
-                       goto bail;
-               }
-               /*
-                * If this request is a RDMA read or atomic, and the ACK is
-                * for a later operation, this ACK NAKs the RDMA read or
-                * atomic.  In other words, only a RDMA_READ_LAST or ONLY
-                * can ACK a RDMA read and likewise for atomic ops.  Note
-                * that the NAK case can only happen if relaxed ordering is
-                * used and requests are sent after an RDMA read or atomic
-                * is sent but before the response is received.
-                */
-               if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
-                    (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
-                   ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
-                    (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
-                       /*
-                        * The last valid PSN seen is the previous
-                        * request's.
-                        */
-                       update_last_psn(qp, wqe->psn - 1);
-                       /* Retry this request. */
-                       ipath_restart_rc(qp, wqe->psn);
-                       /*
-                        * No need to process the ACK/NAK since we are
-                        * restarting an earlier request.
-                        */
-                       goto bail;
-               }
-               if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                   wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
-                       *(u64 *) wqe->sg_list[0].vaddr = val;
-               if (qp->s_num_rd_atomic &&
-                   (wqe->wr.opcode == IB_WR_RDMA_READ ||
-                    wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
-                       qp->s_num_rd_atomic--;
-                       /* Restart sending task if fence is complete */
-                       if (((qp->s_flags & IPATH_S_FENCE_PENDING) &&
-                            !qp->s_num_rd_atomic) ||
-                           qp->s_flags & IPATH_S_RDMAR_PENDING)
-                               ipath_schedule_send(qp);
-               }
-               /* Post a send completion queue entry if requested. */
-               if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
-                   (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
-                       wc.wr_id = wqe->wr.wr_id;
-                       wc.status = IB_WC_SUCCESS;
-                       wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-                       wc.byte_len = wqe->length;
-                       wc.qp = &qp->ibqp;
-                       wc.src_qp = qp->remote_qpn;
-                       wc.slid = qp->remote_ah_attr.dlid;
-                       wc.sl = qp->remote_ah_attr.sl;
-                       ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
-               }
-               qp->s_retry = qp->s_retry_cnt;
-               /*
-                * If we are completing a request which is in the process of
-                * being resent, we can stop resending it since we know the
-                * responder has already seen it.
-                */
-               if (qp->s_last == qp->s_cur) {
-                       if (++qp->s_cur >= qp->s_size)
-                               qp->s_cur = 0;
-                       qp->s_last = qp->s_cur;
-                       if (qp->s_last == qp->s_tail)
-                               break;
-                       wqe = get_swqe_ptr(qp, qp->s_cur);
-                       qp->s_state = OP(SEND_LAST);
-                       qp->s_psn = wqe->psn;
-               } else {
-                       if (++qp->s_last >= qp->s_size)
-                               qp->s_last = 0;
-                       if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
-                               qp->s_draining = 0;
-                       if (qp->s_last == qp->s_tail)
-                               break;
-                       wqe = get_swqe_ptr(qp, qp->s_last);
-               }
-       }
-
-       switch (aeth >> 29) {
-       case 0:         /* ACK */
-               dev->n_rc_acks++;
-               /* If this is a partial ACK, reset the retransmit timer. */
-               if (qp->s_last != qp->s_tail) {
-                       spin_lock(&dev->pending_lock);
-                       if (list_empty(&qp->timerwait))
-                               list_add_tail(&qp->timerwait,
-                                       &dev->pending[dev->pending_index]);
-                       spin_unlock(&dev->pending_lock);
-                       /*
-                        * If we get a partial ACK for a resent operation,
-                        * we can stop resending the earlier packets and
-                        * continue with the next packet the receiver wants.
-                        */
-                       if (ipath_cmp24(qp->s_psn, psn) <= 0) {
-                               reset_psn(qp, psn + 1);
-                               ipath_schedule_send(qp);
-                       }
-               } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
-                       qp->s_state = OP(SEND_LAST);
-                       qp->s_psn = psn + 1;
-               }
-               ipath_get_credit(qp, aeth);
-               qp->s_rnr_retry = qp->s_rnr_retry_cnt;
-               qp->s_retry = qp->s_retry_cnt;
-               update_last_psn(qp, psn);
-               ret = 1;
-               goto bail;
-
-       case 1:         /* RNR NAK */
-               dev->n_rnr_naks++;
-               if (qp->s_last == qp->s_tail)
-                       goto bail;
-               if (qp->s_rnr_retry == 0) {
-                       status = IB_WC_RNR_RETRY_EXC_ERR;
-                       goto class_b;
-               }
-               if (qp->s_rnr_retry_cnt < 7)
-                       qp->s_rnr_retry--;
-
-               /* The last valid PSN is the previous PSN. */
-               update_last_psn(qp, psn - 1);
-
-               if (wqe->wr.opcode == IB_WR_RDMA_READ)
-                       dev->n_rc_resends++;
-               else
-                       dev->n_rc_resends +=
-                               (qp->s_psn - psn) & IPATH_PSN_MASK;
-
-               reset_psn(qp, psn);
-
-               qp->s_rnr_timeout =
-                       ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
-                                          IPATH_AETH_CREDIT_MASK];
-               ipath_insert_rnr_queue(qp);
-               ipath_schedule_send(qp);
-               goto bail;
-
-       case 3:         /* NAK */
-               if (qp->s_last == qp->s_tail)
-                       goto bail;
-               /* The last valid PSN is the previous PSN. */
-               update_last_psn(qp, psn - 1);
-               switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
-                       IPATH_AETH_CREDIT_MASK) {
-               case 0: /* PSN sequence error */
-                       dev->n_seq_naks++;
-                       /*
-                        * Back up to the responder's expected PSN.
-                        * Note that we might get a NAK in the middle of an
-                        * RDMA READ response which terminates the RDMA
-                        * READ.
-                        */
-                       ipath_restart_rc(qp, psn);
-                       break;
-
-               case 1: /* Invalid Request */
-                       status = IB_WC_REM_INV_REQ_ERR;
-                       dev->n_other_naks++;
-                       goto class_b;
-
-               case 2: /* Remote Access Error */
-                       status = IB_WC_REM_ACCESS_ERR;
-                       dev->n_other_naks++;
-                       goto class_b;
-
-               case 3: /* Remote Operation Error */
-                       status = IB_WC_REM_OP_ERR;
-                       dev->n_other_naks++;
-               class_b:
-                       ipath_send_complete(qp, wqe, status);
-                       ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-                       break;
-
-               default:
-                       /* Ignore other reserved NAK error codes */
-                       goto reserved;
-               }
-               qp->s_rnr_retry = qp->s_rnr_retry_cnt;
-               goto bail;
-
-       default:                /* 2: reserved */
-       reserved:
-               /* Ignore reserved NAK codes. */
-               goto bail;
-       }
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_rc_rcv_resp - process an incoming RC response packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
-                                    struct ipath_other_headers *ohdr,
-                                    void *data, u32 tlen,
-                                    struct ipath_qp *qp,
-                                    u32 opcode,
-                                    u32 psn, u32 hdrsize, u32 pmtu,
-                                    int header_in_data)
-{
-       struct ipath_swqe *wqe;
-       enum ib_wc_status status;
-       unsigned long flags;
-       int diff;
-       u32 pad;
-       u32 aeth;
-       u64 val;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Double check we can process this now that we hold the s_lock. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-               goto ack_done;
-
-       /* Ignore invalid responses. */
-       if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
-               goto ack_done;
-
-       /* Ignore duplicate responses. */
-       diff = ipath_cmp24(psn, qp->s_last_psn);
-       if (unlikely(diff <= 0)) {
-               /* Update credits for "ghost" ACKs */
-               if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
-                       if (!header_in_data)
-                               aeth = be32_to_cpu(ohdr->u.aeth);
-                       else {
-                               aeth = be32_to_cpu(((__be32 *) data)[0]);
-                               data += sizeof(__be32);
-                       }
-                       if ((aeth >> 29) == 0)
-                               ipath_get_credit(qp, aeth);
-               }
-               goto ack_done;
-       }
-
-       if (unlikely(qp->s_last == qp->s_tail))
-               goto ack_done;
-       wqe = get_swqe_ptr(qp, qp->s_last);
-       status = IB_WC_SUCCESS;
-
-       switch (opcode) {
-       case OP(ACKNOWLEDGE):
-       case OP(ATOMIC_ACKNOWLEDGE):
-       case OP(RDMA_READ_RESPONSE_FIRST):
-               if (!header_in_data)
-                       aeth = be32_to_cpu(ohdr->u.aeth);
-               else {
-                       aeth = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               }
-               if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
-                       if (!header_in_data) {
-                               __be32 *p = ohdr->u.at.atomic_ack_eth;
-
-                               val = ((u64) be32_to_cpu(p[0]) << 32) |
-                                       be32_to_cpu(p[1]);
-                       } else
-                               val = be64_to_cpu(((__be64 *) data)[0]);
-               } else
-                       val = 0;
-               if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
-                   opcode != OP(RDMA_READ_RESPONSE_FIRST))
-                       goto ack_done;
-               hdrsize += 4;
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-                       goto ack_op_err;
-               qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
-               /*
-                * If this is a response to a resent RDMA read, we
-                * have to be careful to copy the data to the right
-                * location.
-                */
-               qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
-                                                 wqe, psn, pmtu);
-               goto read_middle;
-
-       case OP(RDMA_READ_RESPONSE_MIDDLE):
-               /* no AETH, no ACK */
-               if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
-                       dev->n_rdma_seq++;
-                       if (qp->r_flags & IPATH_R_RDMAR_SEQ)
-                               goto ack_done;
-                       qp->r_flags |= IPATH_R_RDMAR_SEQ;
-                       ipath_restart_rc(qp, qp->s_last_psn + 1);
-                       goto ack_done;
-               }
-               if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-                       goto ack_op_err;
-       read_middle:
-               if (unlikely(tlen != (hdrsize + pmtu + 4)))
-                       goto ack_len_err;
-               if (unlikely(pmtu >= qp->s_rdma_read_len))
-                       goto ack_len_err;
-
-               /* We got a response so update the timeout. */
-               spin_lock(&dev->pending_lock);
-               if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
-                       list_move_tail(&qp->timerwait,
-                                      &dev->pending[dev->pending_index]);
-               spin_unlock(&dev->pending_lock);
-
-               if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
-                       qp->s_retry = qp->s_retry_cnt;
-
-               /*
-                * Update the RDMA receive state but do the copy w/o
-                * holding the locks and blocking interrupts.
-                */
-               qp->s_rdma_read_len -= pmtu;
-               update_last_psn(qp, psn);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
-               goto bail;
-
-       case OP(RDMA_READ_RESPONSE_ONLY):
-               if (!header_in_data)
-                       aeth = be32_to_cpu(ohdr->u.aeth);
-               else
-                       aeth = be32_to_cpu(((__be32 *) data)[0]);
-               if (!do_rc_ack(qp, aeth, psn, opcode, 0))
-                       goto ack_done;
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /*
-                * Check that the data size is >= 0 && <= pmtu.
-                * Remember to account for the AETH header (4) and
-                * ICRC (4).
-                */
-               if (unlikely(tlen < (hdrsize + pad + 8)))
-                       goto ack_len_err;
-               /*
-                * If this is a response to a resent RDMA read, we
-                * have to be careful to copy the data to the right
-                * location.
-                */
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
-                                                 wqe, psn, pmtu);
-               goto read_last;
-
-       case OP(RDMA_READ_RESPONSE_LAST):
-               /* ACKs READ req. */
-               if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
-                       dev->n_rdma_seq++;
-                       if (qp->r_flags & IPATH_R_RDMAR_SEQ)
-                               goto ack_done;
-                       qp->r_flags |= IPATH_R_RDMAR_SEQ;
-                       ipath_restart_rc(qp, qp->s_last_psn + 1);
-                       goto ack_done;
-               }
-               if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-                       goto ack_op_err;
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /*
-                * Check that the data size is >= 1 && <= pmtu.
-                * Remember to account for the AETH header (4) and
-                * ICRC (4).
-                */
-               if (unlikely(tlen <= (hdrsize + pad + 8)))
-                       goto ack_len_err;
-       read_last:
-               tlen -= hdrsize + pad + 8;
-               if (unlikely(tlen != qp->s_rdma_read_len))
-                       goto ack_len_err;
-               if (!header_in_data)
-                       aeth = be32_to_cpu(ohdr->u.aeth);
-               else {
-                       aeth = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               }
-               ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
-               (void) do_rc_ack(qp, aeth, psn,
-                                OP(RDMA_READ_RESPONSE_LAST), 0);
-               goto ack_done;
-       }
-
-ack_op_err:
-       status = IB_WC_LOC_QP_OP_ERR;
-       goto ack_err;
-
-ack_len_err:
-       status = IB_WC_LOC_LEN_ERR;
-ack_err:
-       ipath_send_complete(qp, wqe, status);
-       ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-ack_done:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
-       return;
-}
-
-/**
- * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
-                                    struct ipath_other_headers *ohdr,
-                                    void *data,
-                                    struct ipath_qp *qp,
-                                    u32 opcode,
-                                    u32 psn,
-                                    int diff,
-                                    int header_in_data)
-{
-       struct ipath_ack_entry *e;
-       u8 i, prev;
-       int old_req;
-       unsigned long flags;
-
-       if (diff > 0) {
-               /*
-                * Packet sequence error.
-                * A NAK will ACK earlier sends and RDMA writes.
-                * Don't queue the NAK if we already sent one.
-                */
-               if (!qp->r_nak_state) {
-                       qp->r_nak_state = IB_NAK_PSN_ERROR;
-                       /* Use the expected PSN. */
-                       qp->r_ack_psn = qp->r_psn;
-                       goto send_ack;
-               }
-               goto done;
-       }
-
-       /*
-        * Handle a duplicate request.  Don't re-execute SEND, RDMA
-        * write or atomic op.  Don't NAK errors, just silently drop
-        * the duplicate request.  Note that r_sge, r_len, and
-        * r_rcv_len may be in use so don't modify them.
-        *
-        * We are supposed to ACK the earliest duplicate PSN but we
-        * can coalesce an outstanding duplicate ACK.  We have to
-        * send the earliest so that RDMA reads can be restarted at
-        * the requester's expected PSN.
-        *
-        * First, find where this duplicate PSN falls within the
-        * ACKs previously sent.
-        */
-       psn &= IPATH_PSN_MASK;
-       e = NULL;
-       old_req = 1;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-       /* Double check we can process this now that we hold the s_lock. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-               goto unlock_done;
-
-       for (i = qp->r_head_ack_queue; ; i = prev) {
-               if (i == qp->s_tail_ack_queue)
-                       old_req = 0;
-               if (i)
-                       prev = i - 1;
-               else
-                       prev = IPATH_MAX_RDMA_ATOMIC;
-               if (prev == qp->r_head_ack_queue) {
-                       e = NULL;
-                       break;
-               }
-               e = &qp->s_ack_queue[prev];
-               if (!e->opcode) {
-                       e = NULL;
-                       break;
-               }
-               if (ipath_cmp24(psn, e->psn) >= 0) {
-                       if (prev == qp->s_tail_ack_queue)
-                               old_req = 0;
-                       break;
-               }
-       }
-       switch (opcode) {
-       case OP(RDMA_READ_REQUEST): {
-               struct ib_reth *reth;
-               u32 offset;
-               u32 len;
-
-               /*
-                * If we didn't find the RDMA read request in the ack queue,
-                * or the send tasklet is already backed up to send an
-                * earlier entry, we can ignore this request.
-                */
-               if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
-                       goto unlock_done;
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               /*
-                * Address range must be a subset of the original
-                * request and start on pmtu boundaries.
-                * We reuse the old ack_queue slot since the requester
-                * should not back up and request an earlier PSN for the
-                * same request.
-                */
-               offset = ((psn - e->psn) & IPATH_PSN_MASK) *
-                       ib_mtu_enum_to_int(qp->path_mtu);
-               len = be32_to_cpu(reth->length);
-               if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
-                       goto unlock_done;
-               if (len != 0) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       ok = ipath_rkey_ok(qp, &e->rdma_sge,
-                                          len, vaddr, rkey,
-                                          IB_ACCESS_REMOTE_READ);
-                       if (unlikely(!ok))
-                               goto unlock_done;
-               } else {
-                       e->rdma_sge.sg_list = NULL;
-                       e->rdma_sge.num_sge = 0;
-                       e->rdma_sge.sge.mr = NULL;
-                       e->rdma_sge.sge.vaddr = NULL;
-                       e->rdma_sge.sge.length = 0;
-                       e->rdma_sge.sge.sge_length = 0;
-               }
-               e->psn = psn;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               qp->s_tail_ack_queue = prev;
-               break;
-       }
-
-       case OP(COMPARE_SWAP):
-       case OP(FETCH_ADD): {
-               /*
-                * If we didn't find the atomic request in the ack queue
-                * or the send tasklet is already backed up to send an
-                * earlier entry, we can ignore this request.
-                */
-               if (!e || e->opcode != (u8) opcode || old_req)
-                       goto unlock_done;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               qp->s_tail_ack_queue = prev;
-               break;
-       }
-
-       default:
-               if (old_req)
-                       goto unlock_done;
-               /*
-                * Resend the most recent ACK if this request is
-                * after all the previous RDMA reads and atomics.
-                */
-               if (i == qp->r_head_ack_queue) {
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
-                       qp->r_nak_state = 0;
-                       qp->r_ack_psn = qp->r_psn - 1;
-                       goto send_ack;
-               }
-               /*
-                * Try to send a simple ACK to work around a Mellanox bug
-                * which doesn't accept a RDMA read response or atomic
-                * response as an ACK for earlier SENDs or RDMA writes.
-                */
-               if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
-                   !(qp->s_flags & IPATH_S_ACK_PENDING) &&
-                   qp->s_ack_state == OP(ACKNOWLEDGE)) {
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
-                       qp->r_nak_state = 0;
-                       qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
-                       goto send_ack;
-               }
-               /*
-                * Resend the RDMA read or atomic op which
-                * ACKs this duplicate request.
-                */
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               qp->s_tail_ack_queue = i;
-               break;
-       }
-       qp->r_nak_state = 0;
-       ipath_schedule_send(qp);
-
-unlock_done:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-       return 1;
-
-send_ack:
-       return 0;
-}
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
-{
-       unsigned long flags;
-       int lastwqe;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-       lastwqe = ipath_error_qp(qp, err);
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
-       if (lastwqe) {
-               struct ib_event ev;
-
-               ev.device = qp->ibqp.device;
-               ev.element.qp = &qp->ibqp;
-               ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-               qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-       }
-}
-
-static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
-{
-       unsigned next;
-
-       next = n + 1;
-       if (next > IPATH_MAX_RDMA_ATOMIC)
-               next = 0;
-       if (n == qp->s_tail_ack_queue) {
-               qp->s_tail_ack_queue = next;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-       }
-}
-
-/**
- * ipath_rc_rcv - process an incoming RC packet
- * @dev: the device this packet came in on
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from ipath_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       u32 opcode;
-       u32 hdrsize;
-       u32 psn;
-       u32 pad;
-       struct ib_wc wc;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       int diff;
-       struct ib_reth *reth;
-       int header_in_data;
-       unsigned long flags;
-
-       /* Validate the SLID. See Ch. 9.6.1.5 */
-       if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
-               goto done;
-
-       /* Check for GRH */
-       if (!has_grh) {
-               ohdr = &hdr->u.oth;
-               hdrsize = 8 + 12;       /* LRH + BTH */
-               psn = be32_to_cpu(ohdr->bth[2]);
-               header_in_data = 0;
-       } else {
-               ohdr = &hdr->u.l.oth;
-               hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
-               /*
-                * The header with GRH is 60 bytes and the core driver sets
-                * the eager header buffer size to 56 bytes so the last 4
-                * bytes of the BTH header (PSN) is in the data buffer.
-                */
-               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-               if (header_in_data) {
-                       psn = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               } else
-                       psn = be32_to_cpu(ohdr->bth[2]);
-       }
-
-       /*
-        * Process responses (ACKs) before anything else.  Note that the
-        * packet sequence number will be for something in the send work
-        * queue rather than the expected receive packet sequence number.
-        * In other words, this QP is the requester.
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-       if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
-           opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
-               ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
-                                 hdrsize, pmtu, header_in_data);
-               goto done;
-       }
-
-       /* Compute 24 bits worth of difference. */
-       diff = ipath_cmp24(psn, qp->r_psn);
-       if (unlikely(diff)) {
-               if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
-                                      psn, diff, header_in_data))
-                       goto done;
-               goto send_ack;
-       }
-
-       /* Check for opcode sequence errors. */
-       switch (qp->r_state) {
-       case OP(SEND_FIRST):
-       case OP(SEND_MIDDLE):
-               if (opcode == OP(SEND_MIDDLE) ||
-                   opcode == OP(SEND_LAST) ||
-                   opcode == OP(SEND_LAST_WITH_IMMEDIATE))
-                       break;
-               goto nack_inv;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_MIDDLE):
-               if (opcode == OP(RDMA_WRITE_MIDDLE) ||
-                   opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-                       break;
-               goto nack_inv;
-
-       default:
-               if (opcode == OP(SEND_MIDDLE) ||
-                   opcode == OP(SEND_LAST) ||
-                   opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
-                   opcode == OP(RDMA_WRITE_MIDDLE) ||
-                   opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-                       goto nack_inv;
-               /*
-                * Note that it is up to the requester to not send a new
-                * RDMA read or atomic operation before receiving an ACK
-                * for the previous operation.
-                */
-               break;
-       }
-
-       memset(&wc, 0, sizeof wc);
-
-       /* OK, process the packet. */
-       switch (opcode) {
-       case OP(SEND_FIRST):
-               if (!ipath_get_rwqe(qp, 0))
-                       goto rnr_nak;
-               qp->r_rcv_len = 0;
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-       case OP(RDMA_WRITE_MIDDLE):
-       send_middle:
-               /* Check for invalid length PMTU or posted rwqe len. */
-               if (unlikely(tlen != (hdrsize + pmtu + 4)))
-                       goto nack_inv;
-               qp->r_rcv_len += pmtu;
-               if (unlikely(qp->r_rcv_len > qp->r_len))
-                       goto nack_inv;
-               ipath_copy_sge(&qp->r_sge, data, pmtu);
-               break;
-
-       case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-               /* consume RWQE */
-               if (!ipath_get_rwqe(qp, 1))
-                       goto rnr_nak;
-               goto send_last_imm;
-
-       case OP(SEND_ONLY):
-       case OP(SEND_ONLY_WITH_IMMEDIATE):
-               if (!ipath_get_rwqe(qp, 0))
-                       goto rnr_nak;
-               qp->r_rcv_len = 0;
-               if (opcode == OP(SEND_ONLY))
-                       goto send_last;
-               /* FALLTHROUGH */
-       case OP(SEND_LAST_WITH_IMMEDIATE):
-       send_last_imm:
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else {
-                       /* Immediate data comes after BTH */
-                       wc.ex.imm_data = ohdr->u.imm_data;
-               }
-               hdrsize += 4;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               /* FALLTHROUGH */
-       case OP(SEND_LAST):
-       case OP(RDMA_WRITE_LAST):
-       send_last:
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4)))
-                       goto nack_inv;
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               wc.byte_len = tlen + qp->r_rcv_len;
-               if (unlikely(wc.byte_len > qp->r_len))
-                       goto nack_inv;
-               ipath_copy_sge(&qp->r_sge, data, tlen);
-               qp->r_msn++;
-               if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-                       break;
-               wc.wr_id = qp->r_wr_id;
-               wc.status = IB_WC_SUCCESS;
-               if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
-                   opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-                       wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-               else
-                       wc.opcode = IB_WC_RECV;
-               wc.qp = &qp->ibqp;
-               wc.src_qp = qp->remote_qpn;
-               wc.slid = qp->remote_ah_attr.dlid;
-               wc.sl = qp->remote_ah_attr.sl;
-               /* Signal completion event if the solicited bit is set. */
-               ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                              (ohdr->bth[0] &
-                               cpu_to_be32(1 << 23)) != 0);
-               break;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_ONLY):
-       case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_WRITE)))
-                       goto nack_inv;
-               /* consume RWQE */
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               hdrsize += sizeof(*reth);
-               qp->r_len = be32_to_cpu(reth->length);
-               qp->r_rcv_len = 0;
-               if (qp->r_len != 0) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       /* Check rkey & NAK */
-                       ok = ipath_rkey_ok(qp, &qp->r_sge,
-                                          qp->r_len, vaddr, rkey,
-                                          IB_ACCESS_REMOTE_WRITE);
-                       if (unlikely(!ok))
-                               goto nack_acc;
-               } else {
-                       qp->r_sge.sg_list = NULL;
-                       qp->r_sge.sge.mr = NULL;
-                       qp->r_sge.sge.vaddr = NULL;
-                       qp->r_sge.sge.length = 0;
-                       qp->r_sge.sge.sge_length = 0;
-               }
-               if (opcode == OP(RDMA_WRITE_FIRST))
-                       goto send_middle;
-               else if (opcode == OP(RDMA_WRITE_ONLY))
-                       goto send_last;
-               if (!ipath_get_rwqe(qp, 1))
-                       goto rnr_nak;
-               goto send_last_imm;
-
-       case OP(RDMA_READ_REQUEST): {
-               struct ipath_ack_entry *e;
-               u32 len;
-               u8 next;
-
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_READ)))
-                       goto nack_inv;
-               next = qp->r_head_ack_queue + 1;
-               if (next > IPATH_MAX_RDMA_ATOMIC)
-                       next = 0;
-               spin_lock_irqsave(&qp->s_lock, flags);
-               /* Double check we can process this while holding the s_lock. */
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-                       goto unlock;
-               if (unlikely(next == qp->s_tail_ack_queue)) {
-                       if (!qp->s_ack_queue[next].sent)
-                               goto nack_inv_unlck;
-                       ipath_update_ack_queue(qp, next);
-               }
-               e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               len = be32_to_cpu(reth->length);
-               if (len) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       /* Check rkey & NAK */
-                       ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
-                                          rkey, IB_ACCESS_REMOTE_READ);
-                       if (unlikely(!ok))
-                               goto nack_acc_unlck;
-                       /*
-                        * Update the next expected PSN.  We add 1 later
-                        * below, so only add the remainder here.
-                        */
-                       if (len > pmtu)
-                               qp->r_psn += (len - 1) / pmtu;
-               } else {
-                       e->rdma_sge.sg_list = NULL;
-                       e->rdma_sge.num_sge = 0;
-                       e->rdma_sge.sge.mr = NULL;
-                       e->rdma_sge.sge.vaddr = NULL;
-                       e->rdma_sge.sge.length = 0;
-                       e->rdma_sge.sge.sge_length = 0;
-               }
-               e->opcode = opcode;
-               e->sent = 0;
-               e->psn = psn;
-               /*
-                * We need to increment the MSN here instead of when we
-                * finish sending the result since a duplicate request would
-                * increment it more than once.
-                */
-               qp->r_msn++;
-               qp->r_psn++;
-               qp->r_state = opcode;
-               qp->r_nak_state = 0;
-               qp->r_head_ack_queue = next;
-
-               /* Schedule the send tasklet. */
-               ipath_schedule_send(qp);
-
-               goto unlock;
-       }
-
-       case OP(COMPARE_SWAP):
-       case OP(FETCH_ADD): {
-               struct ib_atomic_eth *ateth;
-               struct ipath_ack_entry *e;
-               u64 vaddr;
-               atomic64_t *maddr;
-               u64 sdata;
-               u32 rkey;
-               u8 next;
-
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_ATOMIC)))
-                       goto nack_inv;
-               next = qp->r_head_ack_queue + 1;
-               if (next > IPATH_MAX_RDMA_ATOMIC)
-                       next = 0;
-               spin_lock_irqsave(&qp->s_lock, flags);
-               /* Double check we can process this while holding the s_lock. */
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-                       goto unlock;
-               if (unlikely(next == qp->s_tail_ack_queue)) {
-                       if (!qp->s_ack_queue[next].sent)
-                               goto nack_inv_unlck;
-                       ipath_update_ack_queue(qp, next);
-               }
-               if (!header_in_data)
-                       ateth = &ohdr->u.atomic_eth;
-               else
-                       ateth = (struct ib_atomic_eth *)data;
-               vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
-                       be32_to_cpu(ateth->vaddr[1]);
-               if (unlikely(vaddr & (sizeof(u64) - 1)))
-                       goto nack_inv_unlck;
-               rkey = be32_to_cpu(ateth->rkey);
-               /* Check rkey & NAK */
-               if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
-                                           sizeof(u64), vaddr, rkey,
-                                           IB_ACCESS_REMOTE_ATOMIC)))
-                       goto nack_acc_unlck;
-               /* Perform atomic OP and save result. */
-               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-               sdata = be64_to_cpu(ateth->swap_data);
-               e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               e->atomic_data = (opcode == OP(FETCH_ADD)) ?
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
-                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-                                     be64_to_cpu(ateth->compare_data),
-                                     sdata);
-               e->opcode = opcode;
-               e->sent = 0;
-               e->psn = psn & IPATH_PSN_MASK;
-               qp->r_msn++;
-               qp->r_psn++;
-               qp->r_state = opcode;
-               qp->r_nak_state = 0;
-               qp->r_head_ack_queue = next;
-
-               /* Schedule the send tasklet. */
-               ipath_schedule_send(qp);
-
-               goto unlock;
-       }
-
-       default:
-               /* NAK unknown opcodes. */
-               goto nack_inv;
-       }
-       qp->r_psn++;
-       qp->r_state = opcode;
-       qp->r_ack_psn = psn;
-       qp->r_nak_state = 0;
-       /* Send an ACK if requested or required. */
-       if (psn & (1 << 31))
-               goto send_ack;
-       goto done;
-
-rnr_nak:
-       qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
-       qp->r_ack_psn = qp->r_psn;
-       goto send_ack;
-
-nack_inv_unlck:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
-       ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-       qp->r_nak_state = IB_NAK_INVALID_REQUEST;
-       qp->r_ack_psn = qp->r_psn;
-       goto send_ack;
-
-nack_acc_unlck:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
-       ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
-       qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
-       qp->r_ack_psn = qp->r_psn;
-send_ack:
-       send_rc_ack(qp);
-       goto done;
-
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_registers.h b/drivers/staging/rdma/ipath/ipath_registers.h
deleted file mode 100644 (file)
index 8f44d0c..0000000
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_REGISTERS_H
-#define _IPATH_REGISTERS_H
-
-/*
- * This file should only be included by kernel source, and by the diags.  It
- * defines the registers, and their contents, for InfiniPath chips.
- */
-
-/*
- * These are the InfiniPath register and buffer bit definitions,
- * that are visible to software, and needed only by the kernel
- * and diag code.  A few, that are visible to protocol and user
- * code are in ipath_common.h.  Some bits are specific
- * to a given chip implementation, and have been moved to the
- * chip-specific source file
- */
-
-/* kr_revision bits */
-#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
-#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
-#define INFINIPATH_R_ARCH_MASK 0xFF
-#define INFINIPATH_R_ARCH_SHIFT 16
-#define INFINIPATH_R_SOFTWARE_MASK 0xFF
-#define INFINIPATH_R_SOFTWARE_SHIFT 24
-#define INFINIPATH_R_BOARDID_MASK 0xFF
-#define INFINIPATH_R_BOARDID_SHIFT 32
-
-/* kr_control bits */
-#define INFINIPATH_C_FREEZEMODE 0x00000002
-#define INFINIPATH_C_LINKENABLE 0x00000004
-
-/* kr_sendctrl bits */
-#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
-#define INFINIPATH_S_UPDTHRESH_SHIFT 24
-#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
-
-#define IPATH_S_ABORT          0
-#define IPATH_S_PIOINTBUFAVAIL 1
-#define IPATH_S_PIOBUFAVAILUPD 2
-#define IPATH_S_PIOENABLE      3
-#define IPATH_S_SDMAINTENABLE  9
-#define IPATH_S_SDMASINGLEDESCRIPTOR   10
-#define IPATH_S_SDMAENABLE     11
-#define IPATH_S_SDMAHALT       12
-#define IPATH_S_DISARM         31
-
-#define INFINIPATH_S_ABORT             (1U << IPATH_S_ABORT)
-#define INFINIPATH_S_PIOINTBUFAVAIL    (1U << IPATH_S_PIOINTBUFAVAIL)
-#define INFINIPATH_S_PIOBUFAVAILUPD    (1U << IPATH_S_PIOBUFAVAILUPD)
-#define INFINIPATH_S_PIOENABLE         (1U << IPATH_S_PIOENABLE)
-#define INFINIPATH_S_SDMAINTENABLE     (1U << IPATH_S_SDMAINTENABLE)
-#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
-                                       (1U << IPATH_S_SDMASINGLEDESCRIPTOR)
-#define INFINIPATH_S_SDMAENABLE                (1U << IPATH_S_SDMAENABLE)
-#define INFINIPATH_S_SDMAHALT          (1U << IPATH_S_SDMAHALT)
-#define INFINIPATH_S_DISARM            (1U << IPATH_S_DISARM)
-
-/* kr_rcvctrl bits that are the same on multiple chips */
-#define INFINIPATH_R_PORTENABLE_SHIFT 0
-#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_SDMAINT           0x8000000000000000ULL
-#define INFINIPATH_I_SDMADISABLED      0x4000000000000000ULL
-#define INFINIPATH_I_ERROR             0x0000000080000000ULL
-#define INFINIPATH_I_SPIOSENT          0x0000000040000000ULL
-#define INFINIPATH_I_SPIOBUFAVAIL      0x0000000020000000ULL
-#define INFINIPATH_I_GPIO              0x0000000010000000ULL
-#define INFINIPATH_I_JINT              0x0000000004000000ULL
-
-/* kr_errorstatus, kr_errorclear, kr_errormask bits */
-#define INFINIPATH_E_RFORMATERR                        0x0000000000000001ULL
-#define INFINIPATH_E_RVCRC                     0x0000000000000002ULL
-#define INFINIPATH_E_RICRC                     0x0000000000000004ULL
-#define INFINIPATH_E_RMINPKTLEN                        0x0000000000000008ULL
-#define INFINIPATH_E_RMAXPKTLEN                        0x0000000000000010ULL
-#define INFINIPATH_E_RLONGPKTLEN               0x0000000000000020ULL
-#define INFINIPATH_E_RSHORTPKTLEN              0x0000000000000040ULL
-#define INFINIPATH_E_RUNEXPCHAR                        0x0000000000000080ULL
-#define INFINIPATH_E_RUNSUPVL                  0x0000000000000100ULL
-#define INFINIPATH_E_REBP                      0x0000000000000200ULL
-#define INFINIPATH_E_RIBFLOW                   0x0000000000000400ULL
-#define INFINIPATH_E_RBADVERSION               0x0000000000000800ULL
-#define INFINIPATH_E_RRCVEGRFULL               0x0000000000001000ULL
-#define INFINIPATH_E_RRCVHDRFULL               0x0000000000002000ULL
-#define INFINIPATH_E_RBADTID                   0x0000000000004000ULL
-#define INFINIPATH_E_RHDRLEN                   0x0000000000008000ULL
-#define INFINIPATH_E_RHDR                      0x0000000000010000ULL
-#define INFINIPATH_E_RIBLOSTLINK               0x0000000000020000ULL
-#define INFINIPATH_E_SENDSPECIALTRIGGER                0x0000000008000000ULL
-#define INFINIPATH_E_SDMADISABLED              0x0000000010000000ULL
-#define INFINIPATH_E_SMINPKTLEN                        0x0000000020000000ULL
-#define INFINIPATH_E_SMAXPKTLEN                        0x0000000040000000ULL
-#define INFINIPATH_E_SUNDERRUN                 0x0000000080000000ULL
-#define INFINIPATH_E_SPKTLEN                   0x0000000100000000ULL
-#define INFINIPATH_E_SDROPPEDSMPPKT            0x0000000200000000ULL
-#define INFINIPATH_E_SDROPPEDDATAPKT           0x0000000400000000ULL
-#define INFINIPATH_E_SPIOARMLAUNCH             0x0000000800000000ULL
-#define INFINIPATH_E_SUNEXPERRPKTNUM           0x0000001000000000ULL
-#define INFINIPATH_E_SUNSUPVL                  0x0000002000000000ULL
-#define INFINIPATH_E_SENDBUFMISUSE             0x0000004000000000ULL
-#define INFINIPATH_E_SDMAGENMISMATCH           0x0000008000000000ULL
-#define INFINIPATH_E_SDMAOUTOFBOUND            0x0000010000000000ULL
-#define INFINIPATH_E_SDMATAILOUTOFBOUND                0x0000020000000000ULL
-#define INFINIPATH_E_SDMABASE                  0x0000040000000000ULL
-#define INFINIPATH_E_SDMA1STDESC               0x0000080000000000ULL
-#define INFINIPATH_E_SDMARPYTAG                        0x0000100000000000ULL
-#define INFINIPATH_E_SDMADWEN                  0x0000200000000000ULL
-#define INFINIPATH_E_SDMAMISSINGDW             0x0000400000000000ULL
-#define INFINIPATH_E_SDMAUNEXPDATA             0x0000800000000000ULL
-#define INFINIPATH_E_IBSTATUSCHANGED           0x0001000000000000ULL
-#define INFINIPATH_E_INVALIDADDR               0x0002000000000000ULL
-#define INFINIPATH_E_RESET                     0x0004000000000000ULL
-#define INFINIPATH_E_HARDWARE                  0x0008000000000000ULL
-#define INFINIPATH_E_SDMADESCADDRMISALIGN      0x0010000000000000ULL
-#define INFINIPATH_E_INVALIDEEPCMD             0x0020000000000000ULL
-
-/*
- * this is used to print "common" packet errors only when the
- * __IPATH_ERRPKTDBG bit is set in ipath_debug.
- */
-#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
-               | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
-               | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
-               | INFINIPATH_E_REBP )
-
-/* Convenience for decoding Send DMA errors */
-#define INFINIPATH_E_SDMAERRS ( \
-       INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
-       INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
-       INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
-       INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
-       INFINIPATH_E_SDMAUNEXPDATA | \
-       INFINIPATH_E_SDMADESCADDRMISALIGN | \
-       INFINIPATH_E_SDMADISABLED | \
-       INFINIPATH_E_SENDBUFMISUSE)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
- * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2:  expTID, 3: eagerTID
- *             bit 4: flag buffer, 5: datainfo, 6: header info */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-/* txe mem parity errors (shift by INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF  0x1ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC  0x2ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOLAUNCHFIFO 0x4ULL
-/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF   0x01ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ  0x02ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID   0x04ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF  0x10ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO  0x40ULL
-/* waldo specific -- find the rest in ipath_6110.c */
-#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR  0x0000000400000000ULL
-/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
-#define INFINIPATH_HWE_MEMBISTFAILED   0x0040000000000000ULL
-
-/* kr_hwdiagctrl bits */
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR  0x0000000400000000ULL
-#define INFINIPATH_DC_COUNTERDISABLE            0x1000000000000000ULL
-#define INFINIPATH_DC_COUNTERWREN               0x2000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-
-/* kr_ibcctrl bits */
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
-#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
-#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
-#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKCMD_DOWN 1         /* move to 0x11 */
-#define INFINIPATH_IBCC_LINKCMD_ARMED 2                /* move to 0x21 */
-#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3       /* move to 0x31 */
-#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
-#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
-#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
-#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
-#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
-#define INFINIPATH_IBCC_LOOPBACK             0x8000000000000000ULL
-#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
-
-/* kr_ibcstatus bits */
-#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
-#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
-
-#define INFINIPATH_IBCS_TXREADY       0x40000000
-#define INFINIPATH_IBCS_TXCREDITOK    0x80000000
-/* link training states (shift by
-   INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
-#define INFINIPATH_IBCS_LT_STATE_DISABLED      0x00
-#define INFINIPATH_IBCS_LT_STATE_LINKUP                0x01
-#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE    0x02
-#define INFINIPATH_IBCS_LT_STATE_POLLQUIET     0x03
-#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY    0x04
-#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET    0x05
-#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE   0x08
-#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG    0x09
-#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT    0x0a
-#define INFINIPATH_IBCS_LT_STATE_CFGIDLE       0x0b
-#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN        0x0c
-#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT        0x0e
-#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE   0x0f
-/* link state machine states (shift by ibcs_ls_shift) */
-#define INFINIPATH_IBCS_L_STATE_DOWN           0x0
-#define INFINIPATH_IBCS_L_STATE_INIT           0x1
-#define INFINIPATH_IBCS_L_STATE_ARM            0x2
-#define INFINIPATH_IBCS_L_STATE_ACTIVE         0x3
-#define INFINIPATH_IBCS_L_STATE_ACT_DEFER      0x4
-
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
-#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
-#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
-
-/* kr_extctrl bits */
-#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
-#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
-#define INFINIPATH_EXTC_SERDESENABLE         0x80000000ULL
-#define INFINIPATH_EXTC_SERDESCONNECT        0x40000000ULL
-#define INFINIPATH_EXTC_SERDESENTRUNKING     0x20000000ULL
-#define INFINIPATH_EXTC_SERDESDISRXFIFO      0x10000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK1       0x08000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK2       0x04000000ULL
-#define INFINIPATH_EXTC_SERDESENENCDEC       0x02000000ULL
-#define INFINIPATH_EXTC_LED1SECPORT_ON       0x00000020ULL
-#define INFINIPATH_EXTC_LED2SECPORT_ON       0x00000010ULL
-#define INFINIPATH_EXTC_LED1PRIPORT_ON       0x00000008ULL
-#define INFINIPATH_EXTC_LED2PRIPORT_ON       0x00000004ULL
-#define INFINIPATH_EXTC_LEDGBLOK_ON          0x00000002ULL
-#define INFINIPATH_EXTC_LEDGBLERR_OFF        0x00000001ULL
-
-/* kr_partitionkey bits */
-#define INFINIPATH_PKEY_SIZE 16
-#define INFINIPATH_PKEY_MASK 0xFFFF
-#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
-
-/* kr_serdesconfig0 bits */
-#define INFINIPATH_SERDC0_RESET_MASK  0xfULL   /* overal reset bits */
-#define INFINIPATH_SERDC0_RESET_PLL   0x10000000ULL    /* pll reset */
-/* tx idle enables (per lane) */
-#define INFINIPATH_SERDC0_TXIDLE      0xF000ULL
-/* rx detect enables (per lane) */
-#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
-/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
-#define INFINIPATH_SERDC0_L1PWR_DN      0xF0ULL
-
-/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
-#define INFINIPATH_XGXS_RX_POL_SHIFT 19
-#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
-
-
-/*
- * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers.  This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define IPATH_PIO_MAXIBHDR 128
-
-typedef u64 ipath_err_t;
-
-/* The following change with the type of device, so
- * need to be part of the ipath_devdata struct, or
- * we could have problems plugging in devices of
- * different types (e.g. one HT, one PCIE)
- * in one system, to be managed by one driver.
- * On the other hand, this file is may also be included
- * by other code, so leave the declarations here
- * temporarily. Minor footprint issue if common-model
- * linker used, none if C89+ linker used.
- */
-
-/* mask of defined bits for various registers */
-extern u64 infinipath_i_bitsextant;
-extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
-
-/* masks that are different in various chips, or only exist in some chips */
-extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
-
-/*
- * These are the infinipath general register numbers (not offsets).
- * The kernel registers are used directly, those beyond the kernel
- * registers are calculated from one of the base registers.  The use of
- * an integer type doesn't allow type-checking as thorough as, say,
- * an enum but allows for better hiding of chip differences.
- */
-typedef const u16 ipath_kreg,  /* infinipath general registers */
- ipath_creg,                   /* infinipath counter registers */
- ipath_sreg;                   /* kernel-only, infinipath send registers */
-
-/*
- * These are the chip registers common to all infinipath chips, and
- * used both by the kernel and the diagnostics or other user code.
- * They are all implemented such that 64 bit accesses work.
- * Some implement no more than 32 bits.  Because 64 bit reads
- * require 2 HT cmds on opteron, we access those with 32 bit
- * reads for efficiency (they are written as 64 bits, since
- * the extra 32 bits are nearly free on writes, and it slightly reduces
- * complexity).  The rest are all accessed as 64 bits.
- */
-struct ipath_kregs {
-       /* These are the 32 bit group */
-       ipath_kreg kr_control;
-       ipath_kreg kr_counterregbase;
-       ipath_kreg kr_intmask;
-       ipath_kreg kr_intstatus;
-       ipath_kreg kr_pagealign;
-       ipath_kreg kr_portcnt;
-       ipath_kreg kr_rcvtidbase;
-       ipath_kreg kr_rcvtidcnt;
-       ipath_kreg kr_rcvegrbase;
-       ipath_kreg kr_rcvegrcnt;
-       ipath_kreg kr_scratch;
-       ipath_kreg kr_sendctrl;
-       ipath_kreg kr_sendpiobufbase;
-       ipath_kreg kr_sendpiobufcnt;
-       ipath_kreg kr_sendpiosize;
-       ipath_kreg kr_sendregbase;
-       ipath_kreg kr_userregbase;
-       /* These are the 64 bit group */
-       ipath_kreg kr_debugport;
-       ipath_kreg kr_debugportselect;
-       ipath_kreg kr_errorclear;
-       ipath_kreg kr_errormask;
-       ipath_kreg kr_errorstatus;
-       ipath_kreg kr_extctrl;
-       ipath_kreg kr_extstatus;
-       ipath_kreg kr_gpio_clear;
-       ipath_kreg kr_gpio_mask;
-       ipath_kreg kr_gpio_out;
-       ipath_kreg kr_gpio_status;
-       ipath_kreg kr_hwdiagctrl;
-       ipath_kreg kr_hwerrclear;
-       ipath_kreg kr_hwerrmask;
-       ipath_kreg kr_hwerrstatus;
-       ipath_kreg kr_ibcctrl;
-       ipath_kreg kr_ibcstatus;
-       ipath_kreg kr_intblocked;
-       ipath_kreg kr_intclear;
-       ipath_kreg kr_interruptconfig;
-       ipath_kreg kr_mdio;
-       ipath_kreg kr_partitionkey;
-       ipath_kreg kr_rcvbthqp;
-       ipath_kreg kr_rcvbufbase;
-       ipath_kreg kr_rcvbufsize;
-       ipath_kreg kr_rcvctrl;
-       ipath_kreg kr_rcvhdrcnt;
-       ipath_kreg kr_rcvhdrentsize;
-       ipath_kreg kr_rcvhdrsize;
-       ipath_kreg kr_rcvintmembase;
-       ipath_kreg kr_rcvintmemsize;
-       ipath_kreg kr_revision;
-       ipath_kreg kr_sendbuffererror;
-       ipath_kreg kr_sendpioavailaddr;
-       ipath_kreg kr_serdesconfig0;
-       ipath_kreg kr_serdesconfig1;
-       ipath_kreg kr_serdesstatus;
-       ipath_kreg kr_txintmembase;
-       ipath_kreg kr_txintmemsize;
-       ipath_kreg kr_xgxsconfig;
-       ipath_kreg kr_ibpllcfg;
-       /* use these two (and the following N ports) only with
-        * ipath_k*_kreg64_port(); not *kreg64() */
-       ipath_kreg kr_rcvhdraddr;
-       ipath_kreg kr_rcvhdrtailaddr;
-
-       /* remaining registers are not present on all types of infinipath
-          chips  */
-       ipath_kreg kr_rcvpktledcnt;
-       ipath_kreg kr_pcierbuftestreg0;
-       ipath_kreg kr_pcierbuftestreg1;
-       ipath_kreg kr_pcieq0serdesconfig0;
-       ipath_kreg kr_pcieq0serdesconfig1;
-       ipath_kreg kr_pcieq0serdesstatus;
-       ipath_kreg kr_pcieq1serdesconfig0;
-       ipath_kreg kr_pcieq1serdesconfig1;
-       ipath_kreg kr_pcieq1serdesstatus;
-       ipath_kreg kr_hrtbt_guid;
-       ipath_kreg kr_ibcddrctrl;
-       ipath_kreg kr_ibcddrstatus;
-       ipath_kreg kr_jintreload;
-
-       /* send dma related regs */
-       ipath_kreg kr_senddmabase;
-       ipath_kreg kr_senddmalengen;
-       ipath_kreg kr_senddmatail;
-       ipath_kreg kr_senddmahead;
-       ipath_kreg kr_senddmaheadaddr;
-       ipath_kreg kr_senddmabufmask0;
-       ipath_kreg kr_senddmabufmask1;
-       ipath_kreg kr_senddmabufmask2;
-       ipath_kreg kr_senddmastatus;
-
-       /* SerDes related regs (IBA7220-only) */
-       ipath_kreg kr_ibserdesctrl;
-       ipath_kreg kr_ib_epbacc;
-       ipath_kreg kr_ib_epbtrans;
-       ipath_kreg kr_pcie_epbacc;
-       ipath_kreg kr_pcie_epbtrans;
-       ipath_kreg kr_ib_ddsrxeq;
-};
-
-struct ipath_cregs {
-       ipath_creg cr_badformatcnt;
-       ipath_creg cr_erricrccnt;
-       ipath_creg cr_errlinkcnt;
-       ipath_creg cr_errlpcrccnt;
-       ipath_creg cr_errpkey;
-       ipath_creg cr_errrcvflowctrlcnt;
-       ipath_creg cr_err_rlencnt;
-       ipath_creg cr_errslencnt;
-       ipath_creg cr_errtidfull;
-       ipath_creg cr_errtidvalid;
-       ipath_creg cr_errvcrccnt;
-       ipath_creg cr_ibstatuschange;
-       ipath_creg cr_intcnt;
-       ipath_creg cr_invalidrlencnt;
-       ipath_creg cr_invalidslencnt;
-       ipath_creg cr_lbflowstallcnt;
-       ipath_creg cr_iblinkdowncnt;
-       ipath_creg cr_iblinkerrrecovcnt;
-       ipath_creg cr_ibsymbolerrcnt;
-       ipath_creg cr_pktrcvcnt;
-       ipath_creg cr_pktrcvflowctrlcnt;
-       ipath_creg cr_pktsendcnt;
-       ipath_creg cr_pktsendflowcnt;
-       ipath_creg cr_portovflcnt;
-       ipath_creg cr_rcvebpcnt;
-       ipath_creg cr_rcvovflcnt;
-       ipath_creg cr_rxdroppktcnt;
-       ipath_creg cr_senddropped;
-       ipath_creg cr_sendstallcnt;
-       ipath_creg cr_sendunderruncnt;
-       ipath_creg cr_unsupvlcnt;
-       ipath_creg cr_wordrcvcnt;
-       ipath_creg cr_wordsendcnt;
-       ipath_creg cr_vl15droppedpktcnt;
-       ipath_creg cr_rxotherlocalphyerrcnt;
-       ipath_creg cr_excessbufferovflcnt;
-       ipath_creg cr_locallinkintegrityerrcnt;
-       ipath_creg cr_rxvlerrcnt;
-       ipath_creg cr_rxdlidfltrcnt;
-       ipath_creg cr_psstat;
-       ipath_creg cr_psstart;
-       ipath_creg cr_psinterval;
-       ipath_creg cr_psrcvdatacount;
-       ipath_creg cr_psrcvpktscount;
-       ipath_creg cr_psxmitdatacount;
-       ipath_creg cr_psxmitpktscount;
-       ipath_creg cr_psxmitwaitcount;
-};
-
-#endif                         /* _IPATH_REGISTERS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
deleted file mode 100644 (file)
index e541a01..0000000
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/*
- * Convert the AETH RNR timeout code into the number of milliseconds.
- */
-const u32 ib_ipath_rnr_table[32] = {
-       656,                    /* 0 */
-       1,                      /* 1 */
-       1,                      /* 2 */
-       1,                      /* 3 */
-       1,                      /* 4 */
-       1,                      /* 5 */
-       1,                      /* 6 */
-       1,                      /* 7 */
-       1,                      /* 8 */
-       1,                      /* 9 */
-       1,                      /* A */
-       1,                      /* B */
-       1,                      /* C */
-       1,                      /* D */
-       2,                      /* E */
-       2,                      /* F */
-       3,                      /* 10 */
-       4,                      /* 11 */
-       6,                      /* 12 */
-       8,                      /* 13 */
-       11,                     /* 14 */
-       16,                     /* 15 */
-       21,                     /* 16 */
-       31,                     /* 17 */
-       41,                     /* 18 */
-       62,                     /* 19 */
-       82,                     /* 1A */
-       123,                    /* 1B */
-       164,                    /* 1C */
-       246,                    /* 1D */
-       328,                    /* 1E */
-       492                     /* 1F */
-};
-
-/**
- * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
- * @qp: the QP
- *
- * Called with the QP s_lock held and interrupts disabled.
- * XXX Use a simple list for now.  We might need a priority
- * queue if we have lots of QPs waiting for RNR timeouts
- * but that should be rare.
- */
-void ipath_insert_rnr_queue(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
-       /* We already did a spin_lock_irqsave(), so just use spin_lock */
-       spin_lock(&dev->pending_lock);
-       if (list_empty(&dev->rnrwait))
-               list_add(&qp->timerwait, &dev->rnrwait);
-       else {
-               struct list_head *l = &dev->rnrwait;
-               struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
-                                                 timerwait);
-
-               while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
-                       qp->s_rnr_timeout -= nqp->s_rnr_timeout;
-                       l = l->next;
-                       if (l->next == &dev->rnrwait) {
-                               nqp = NULL;
-                               break;
-                       }
-                       nqp = list_entry(l->next, struct ipath_qp,
-                                        timerwait);
-               }
-               if (nqp)
-                       nqp->s_rnr_timeout -= qp->s_rnr_timeout;
-               list_add(&qp->timerwait, l);
-       }
-       spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_init_sge - Validate a RWQE and fill in the SGE state
- * @qp: the QP
- *
- * Return 1 if OK.
- */
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
-                  u32 *lengthp, struct ipath_sge_state *ss)
-{
-       int i, j, ret;
-       struct ib_wc wc;
-
-       *lengthp = 0;
-       for (i = j = 0; i < wqe->num_sge; i++) {
-               if (wqe->sg_list[i].length == 0)
-                       continue;
-               /* Check LKEY */
-               if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
-                                  &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
-                       goto bad_lkey;
-               *lengthp += wqe->sg_list[i].length;
-               j++;
-       }
-       ss->num_sge = j;
-       ret = 1;
-       goto bail;
-
-bad_lkey:
-       memset(&wc, 0, sizeof(wc));
-       wc.wr_id = wqe->wr_id;
-       wc.status = IB_WC_LOC_PROT_ERR;
-       wc.opcode = IB_WC_RECV;
-       wc.qp = &qp->ibqp;
-       /* Signal solicited completion event. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return 0 if no RWQE is available, otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
-{
-       unsigned long flags;
-       struct ipath_rq *rq;
-       struct ipath_rwq *wq;
-       struct ipath_srq *srq;
-       struct ipath_rwqe *wqe;
-       void (*handler)(struct ib_event *, void *);
-       u32 tail;
-       int ret;
-
-       if (qp->ibqp.srq) {
-               srq = to_isrq(qp->ibqp.srq);
-               handler = srq->ibsrq.event_handler;
-               rq = &srq->rq;
-       } else {
-               srq = NULL;
-               handler = NULL;
-               rq = &qp->r_rq;
-       }
-
-       spin_lock_irqsave(&rq->lock, flags);
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               ret = 0;
-               goto unlock;
-       }
-
-       wq = rq->wq;
-       tail = wq->tail;
-       /* Validate tail before using it since it is user writable. */
-       if (tail >= rq->size)
-               tail = 0;
-       do {
-               if (unlikely(tail == wq->head)) {
-                       ret = 0;
-                       goto unlock;
-               }
-               /* Make sure entry is read after head index is read. */
-               smp_rmb();
-               wqe = get_rwqe_ptr(rq, tail);
-               if (++tail >= rq->size)
-                       tail = 0;
-               if (wr_id_only)
-                       break;
-               qp->r_sge.sg_list = qp->r_sg_list;
-       } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
-       qp->r_wr_id = wqe->wr_id;
-       wq->tail = tail;
-
-       ret = 1;
-       set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
-       if (handler) {
-               u32 n;
-
-               /*
-                * validate head pointer value and compute
-                * the number of remaining WQEs.
-                */
-               n = wq->head;
-               if (n >= rq->size)
-                       n = 0;
-               if (n < tail)
-                       n += rq->size - tail;
-               else
-                       n -= tail;
-               if (n < srq->limit) {
-                       struct ib_event ev;
-
-                       srq->limit = 0;
-                       spin_unlock_irqrestore(&rq->lock, flags);
-                       ev.device = qp->ibqp.device;
-                       ev.element.srq = qp->ibqp.srq;
-                       ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-                       handler(&ev, srq->ibsrq.srq_context);
-                       goto bail;
-               }
-       }
-unlock:
-       spin_unlock_irqrestore(&rq->lock, flags);
-bail:
-       return ret;
-}
-
-/**
- * ipath_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from ipath_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send().  We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ipath_ruc_loopback(struct ipath_qp *sqp)
-{
-       struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
-       struct ipath_qp *qp;
-       struct ipath_swqe *wqe;
-       struct ipath_sge *sge;
-       unsigned long flags;
-       struct ib_wc wc;
-       u64 sdata;
-       atomic64_t *maddr;
-       enum ib_wc_status send_status;
-
-       /*
-        * Note that we check the responder QP state after
-        * checking the requester's state.
-        */
-       qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
-
-       spin_lock_irqsave(&sqp->s_lock, flags);
-
-       /* Return if we are already busy processing a work request. */
-       if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
-           !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
-               goto unlock;
-
-       sqp->s_flags |= IPATH_S_BUSY;
-
-again:
-       if (sqp->s_last == sqp->s_head)
-               goto clr_busy;
-       wqe = get_swqe_ptr(sqp, sqp->s_last);
-
-       /* Return if it is not OK to start a new work reqeust. */
-       if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
-               if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
-                       goto clr_busy;
-               /* We are in the error state, flush the work request. */
-               send_status = IB_WC_WR_FLUSH_ERR;
-               goto flush_send;
-       }
-
-       /*
-        * We can rely on the entry not changing without the s_lock
-        * being held until we update s_last.
-        * We increment s_cur to indicate s_last is in progress.
-        */
-       if (sqp->s_last == sqp->s_cur) {
-               if (++sqp->s_cur >= sqp->s_size)
-                       sqp->s_cur = 0;
-       }
-       spin_unlock_irqrestore(&sqp->s_lock, flags);
-
-       if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               dev->n_pkt_drops++;
-               /*
-                * For RC, the requester would timeout and retry so
-                * shortcut the timeouts and just signal too many retries.
-                */
-               if (sqp->ibqp.qp_type == IB_QPT_RC)
-                       send_status = IB_WC_RETRY_EXC_ERR;
-               else
-                       send_status = IB_WC_SUCCESS;
-               goto serr;
-       }
-
-       memset(&wc, 0, sizeof wc);
-       send_status = IB_WC_SUCCESS;
-
-       sqp->s_sge.sge = wqe->sg_list[0];
-       sqp->s_sge.sg_list = wqe->sg_list + 1;
-       sqp->s_sge.num_sge = wqe->wr.num_sge;
-       sqp->s_len = wqe->length;
-       switch (wqe->wr.opcode) {
-       case IB_WR_SEND_WITH_IMM:
-               wc.wc_flags = IB_WC_WITH_IMM;
-               wc.ex.imm_data = wqe->wr.ex.imm_data;
-               /* FALLTHROUGH */
-       case IB_WR_SEND:
-               if (!ipath_get_rwqe(qp, 0))
-                       goto rnr_nak;
-               break;
-
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
-                       goto inv_err;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               wc.ex.imm_data = wqe->wr.ex.imm_data;
-               if (!ipath_get_rwqe(qp, 1))
-                       goto rnr_nak;
-               /* FALLTHROUGH */
-       case IB_WR_RDMA_WRITE:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
-                       goto inv_err;
-               if (wqe->length == 0)
-                       break;
-               if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
-                                           wqe->rdma_wr.remote_addr,
-                                           wqe->rdma_wr.rkey,
-                                           IB_ACCESS_REMOTE_WRITE)))
-                       goto acc_err;
-               break;
-
-       case IB_WR_RDMA_READ:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
-                       goto inv_err;
-               if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
-                                           wqe->rdma_wr.remote_addr,
-                                           wqe->rdma_wr.rkey,
-                                           IB_ACCESS_REMOTE_READ)))
-                       goto acc_err;
-               qp->r_sge.sge = wqe->sg_list[0];
-               qp->r_sge.sg_list = wqe->sg_list + 1;
-               qp->r_sge.num_sge = wqe->wr.num_sge;
-               break;
-
-       case IB_WR_ATOMIC_CMP_AND_SWP:
-       case IB_WR_ATOMIC_FETCH_AND_ADD:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
-                       goto inv_err;
-               if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
-                                           wqe->atomic_wr.remote_addr,
-                                           wqe->atomic_wr.rkey,
-                                           IB_ACCESS_REMOTE_ATOMIC)))
-                       goto acc_err;
-               /* Perform atomic OP and save result. */
-               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-               sdata = wqe->atomic_wr.compare_add;
-               *(u64 *) sqp->s_sge.sge.vaddr =
-                       (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
-                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-                                     sdata, wqe->atomic_wr.swap);
-               goto send_comp;
-
-       default:
-               send_status = IB_WC_LOC_QP_OP_ERR;
-               goto serr;
-       }
-
-       sge = &sqp->s_sge.sge;
-       while (sqp->s_len) {
-               u32 len = sqp->s_len;
-
-               if (len > sge->length)
-                       len = sge->length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--sqp->s_sge.num_sge)
-                               *sge = *sqp->s_sge.sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               sqp->s_len -= len;
-       }
-
-       if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-               goto send_comp;
-
-       if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-       else
-               wc.opcode = IB_WC_RECV;
-       wc.wr_id = qp->r_wr_id;
-       wc.status = IB_WC_SUCCESS;
-       wc.byte_len = wqe->length;
-       wc.qp = &qp->ibqp;
-       wc.src_qp = qp->remote_qpn;
-       wc.slid = qp->remote_ah_attr.dlid;
-       wc.sl = qp->remote_ah_attr.sl;
-       wc.port_num = 1;
-       /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                      wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
-       spin_lock_irqsave(&sqp->s_lock, flags);
-flush_send:
-       sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
-       ipath_send_complete(sqp, wqe, send_status);
-       goto again;
-
-rnr_nak:
-       /* Handle RNR NAK */
-       if (qp->ibqp.qp_type == IB_QPT_UC)
-               goto send_comp;
-       /*
-        * Note: we don't need the s_lock held since the BUSY flag
-        * makes this single threaded.
-        */
-       if (sqp->s_rnr_retry == 0) {
-               send_status = IB_WC_RNR_RETRY_EXC_ERR;
-               goto serr;
-       }
-       if (sqp->s_rnr_retry_cnt < 7)
-               sqp->s_rnr_retry--;
-       spin_lock_irqsave(&sqp->s_lock, flags);
-       if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
-               goto clr_busy;
-       sqp->s_flags |= IPATH_S_WAITING;
-       dev->n_rnr_naks++;
-       sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
-       ipath_insert_rnr_queue(sqp);
-       goto clr_busy;
-
-inv_err:
-       send_status = IB_WC_REM_INV_REQ_ERR;
-       wc.status = IB_WC_LOC_QP_OP_ERR;
-       goto err;
-
-acc_err:
-       send_status = IB_WC_REM_ACCESS_ERR;
-       wc.status = IB_WC_LOC_PROT_ERR;
-err:
-       /* responder goes to error state */
-       ipath_rc_error(qp, wc.status);
-
-serr:
-       spin_lock_irqsave(&sqp->s_lock, flags);
-       ipath_send_complete(sqp, wqe, send_status);
-       if (sqp->ibqp.qp_type == IB_QPT_RC) {
-               int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
-               sqp->s_flags &= ~IPATH_S_BUSY;
-               spin_unlock_irqrestore(&sqp->s_lock, flags);
-               if (lastwqe) {
-                       struct ib_event ev;
-
-                       ev.device = sqp->ibqp.device;
-                       ev.element.qp = &sqp->ibqp;
-                       ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-                       sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
-               }
-               goto done;
-       }
-clr_busy:
-       sqp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
-       if (qp && atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-}
-
-static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
-{
-       if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
-           qp->ibqp.qp_type == IB_QPT_SMI) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-}
-
-/**
- * ipath_no_bufs_available - tell the layer driver we need buffers
- * @qp: the QP that caused the problem
- * @dev: the device we ran out of buffers on
- *
- * Called when we run out of PIO buffers.
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int ipath_no_bufs_available(struct ipath_qp *qp,
-                                   struct ipath_ibdev *dev)
-{
-       unsigned long flags;
-       int ret = 1;
-
-       /*
-        * Note that as soon as want_buffer() is called and
-        * possibly before it returns, ipath_ib_piobufavail()
-        * could be called. Therefore, put QP on the piowait list before
-        * enabling the PIO avail interrupt.
-        */
-       spin_lock_irqsave(&qp->s_lock, flags);
-       if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
-               dev->n_piowait++;
-               qp->s_flags |= IPATH_S_WAITING;
-               qp->s_flags &= ~IPATH_S_BUSY;
-               spin_lock(&dev->pending_lock);
-               if (list_empty(&qp->piowait))
-                       list_add_tail(&qp->piowait, &dev->piowait);
-               spin_unlock(&dev->pending_lock);
-       } else
-               ret = 0;
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       if (ret)
-               want_buffer(dev->dd, qp);
-       return ret;
-}
-
-/**
- * ipath_make_grh - construct a GRH header
- * @dev: a pointer to the ipath device
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
-                  struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
-       hdr->version_tclass_flow =
-               cpu_to_be32((6 << 28) |
-                           (grh->traffic_class << 20) |
-                           grh->flow_label);
-       hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
-       /* next_hdr is defined by C8-7 in ch. 8.4.1 */
-       hdr->next_hdr = 0x1B;
-       hdr->hop_limit = grh->hop_limit;
-       /* The SGID is 32-bit aligned. */
-       hdr->sgid.global.subnet_prefix = dev->gid_prefix;
-       hdr->sgid.global.interface_id = dev->dd->ipath_guid;
-       hdr->dgid = grh->dgid;
-
-       /* GRH header size in 32-bit words. */
-       return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
-                          struct ipath_other_headers *ohdr,
-                          u32 bth0, u32 bth2)
-{
-       u16 lrh0;
-       u32 nwords;
-       u32 extra_bytes;
-
-       /* Construct the header. */
-       extra_bytes = -qp->s_cur_size & 3;
-       nwords = (qp->s_cur_size + extra_bytes) >> 2;
-       lrh0 = IPATH_LRH_BTH;
-       if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-               qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
-                                                &qp->remote_ah_attr.grh,
-                                                qp->s_hdrwords, nwords);
-               lrh0 = IPATH_LRH_GRH;
-       }
-       lrh0 |= qp->remote_ah_attr.sl << 4;
-       qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-       qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
-       qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
-                                      qp->remote_ah_attr.src_path_bits);
-       bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
-       bth0 |= extra_bytes << 20;
-       ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
-       ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(bth2);
-}
-
-/**
- * ipath_do_send - perform a send on a QP
- * @data: contains a pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void ipath_do_send(unsigned long data)
-{
-       struct ipath_qp *qp = (struct ipath_qp *)data;
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       int (*make_req)(struct ipath_qp *qp);
-       unsigned long flags;
-
-       if ((qp->ibqp.qp_type == IB_QPT_RC ||
-            qp->ibqp.qp_type == IB_QPT_UC) &&
-           qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
-               ipath_ruc_loopback(qp);
-               goto bail;
-       }
-
-       if (qp->ibqp.qp_type == IB_QPT_RC)
-              make_req = ipath_make_rc_req;
-       else if (qp->ibqp.qp_type == IB_QPT_UC)
-              make_req = ipath_make_uc_req;
-       else
-              make_req = ipath_make_ud_req;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Return if we are already busy processing a work request. */
-       if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
-           !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               goto bail;
-       }
-
-       qp->s_flags |= IPATH_S_BUSY;
-
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
-again:
-       /* Check for a constructed packet to be sent. */
-       if (qp->s_hdrwords != 0) {
-               /*
-                * If no PIO bufs are available, return.  An interrupt will
-                * call ipath_ib_piobufavail() when one is available.
-                */
-               if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
-                                    qp->s_cur_sge, qp->s_cur_size)) {
-                       if (ipath_no_bufs_available(qp, dev))
-                               goto bail;
-               }
-               dev->n_unicast_xmit++;
-               /* Record that we sent the packet and s_hdr is empty. */
-               qp->s_hdrwords = 0;
-       }
-
-       if (make_req(qp))
-               goto again;
-
-bail:;
-}
-
-/*
- * This should be called with s_lock held.
- */
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
-                        enum ib_wc_status status)
-{
-       u32 old_last, last;
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
-               return;
-
-       /* See ch. 11.2.4.1 and 10.7.3.1 */
-       if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
-           (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-           status != IB_WC_SUCCESS) {
-               struct ib_wc wc;
-
-               memset(&wc, 0, sizeof wc);
-               wc.wr_id = wqe->wr.wr_id;
-               wc.status = status;
-               wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-               wc.qp = &qp->ibqp;
-               if (status == IB_WC_SUCCESS)
-                       wc.byte_len = wqe->length;
-               ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
-                              status != IB_WC_SUCCESS);
-       }
-
-       old_last = last = qp->s_last;
-       if (++last >= qp->s_size)
-               last = 0;
-       qp->s_last = last;
-       if (qp->s_cur == old_last)
-               qp->s_cur = last;
-       if (qp->s_tail == old_last)
-               qp->s_tail = last;
-       if (qp->state == IB_QPS_SQD && last == qp->s_cur)
-               qp->s_draining = 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sdma.c b/drivers/staging/rdma/ipath/ipath_sdma.c
deleted file mode 100644 (file)
index 1ffc06a..0000000
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
-
-static void vl15_watchdog_enq(struct ipath_devdata *dd)
-{
-       /* ipath_sdma_lock must already be held */
-       if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
-               unsigned long interval = (HZ + 19) / 20;
-               dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
-               add_timer(&dd->ipath_sdma_vl15_timer);
-       }
-}
-
-static void vl15_watchdog_deq(struct ipath_devdata *dd)
-{
-       /* ipath_sdma_lock must already be held */
-       if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
-               unsigned long interval = (HZ + 19) / 20;
-               mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
-       } else {
-               del_timer(&dd->ipath_sdma_vl15_timer);
-       }
-}
-
-static void vl15_watchdog_timeout(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-       if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
-               ipath_dbg("vl15 watchdog timeout - clearing\n");
-               ipath_cancel_sends(dd, 1);
-               ipath_hol_down(dd);
-       } else {
-               ipath_dbg("vl15 watchdog timeout - "
-                         "condition already cleared\n");
-       }
-}
-
-static void unmap_desc(struct ipath_devdata *dd, unsigned head)
-{
-       __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
-       u64 desc[2];
-       dma_addr_t addr;
-       size_t len;
-
-       desc[0] = le64_to_cpu(descqp[0]);
-       desc[1] = le64_to_cpu(descqp[1]);
-
-       addr = (desc[1] << 32) | (desc[0] >> 32);
-       len = (desc[0] >> 14) & (0x7ffULL << 2);
-       dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-/*
- * ipath_sdma_lock should be locked before calling this.
- */
-int ipath_sdma_make_progress(struct ipath_devdata *dd)
-{
-       struct list_head *lp = NULL;
-       struct ipath_sdma_txreq *txp = NULL;
-       u16 dmahead;
-       u16 start_idx = 0;
-       int progress = 0;
-
-       if (!list_empty(&dd->ipath_sdma_activelist)) {
-               lp = dd->ipath_sdma_activelist.next;
-               txp = list_entry(lp, struct ipath_sdma_txreq, list);
-               start_idx = txp->start_idx;
-       }
-
-       /*
-        * Read the SDMA head register in order to know that the
-        * interrupt clear has been written to the chip.
-        * Otherwise, we may not get an interrupt for the last
-        * descriptor in the queue.
-        */
-       dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
-       /* sanity check return value for error handling (chip reset, etc.) */
-       if (dmahead >= dd->ipath_sdma_descq_cnt)
-               goto done;
-
-       while (dd->ipath_sdma_descq_head != dmahead) {
-               if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
-                   dd->ipath_sdma_descq_head == start_idx) {
-                       unmap_desc(dd, dd->ipath_sdma_descq_head);
-                       start_idx++;
-                       if (start_idx == dd->ipath_sdma_descq_cnt)
-                               start_idx = 0;
-               }
-
-               /* increment free count and head */
-               dd->ipath_sdma_descq_removed++;
-               if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
-                       dd->ipath_sdma_descq_head = 0;
-
-               if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
-                       /* move to notify list */
-                       if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-                               vl15_watchdog_deq(dd);
-                       list_move_tail(lp, &dd->ipath_sdma_notifylist);
-                       if (!list_empty(&dd->ipath_sdma_activelist)) {
-                               lp = dd->ipath_sdma_activelist.next;
-                               txp = list_entry(lp, struct ipath_sdma_txreq,
-                                                list);
-                               start_idx = txp->start_idx;
-                       } else {
-                               lp = NULL;
-                               txp = NULL;
-                       }
-               }
-               progress = 1;
-       }
-
-       if (progress)
-               tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-done:
-       return progress;
-}
-
-static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
-{
-       struct ipath_sdma_txreq *txp, *txp_next;
-
-       list_for_each_entry_safe(txp, txp_next, list, list) {
-               list_del_init(&txp->list);
-
-               if (txp->callback)
-                       (*txp->callback)(txp->callback_cookie,
-                                        txp->callback_status);
-       }
-}
-
-static void sdma_notify_taskbody(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       struct list_head list;
-
-       INIT_LIST_HEAD(&list);
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       list_splice_init(&dd->ipath_sdma_notifylist, &list);
-
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       ipath_sdma_notify(dd, &list);
-
-       /*
-        * The IB verbs layer needs to see the callback before getting
-        * the call to ipath_ib_piobufavail() because the callback
-        * handles releasing resources the next send will need.
-        * Otherwise, we could do these calls in
-        * ipath_sdma_make_progress().
-        */
-       ipath_ib_piobufavail(dd->verbs_dev);
-}
-
-static void sdma_notify_task(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-       if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               sdma_notify_taskbody(dd);
-}
-
-static void dump_sdma_state(struct ipath_devdata *dd)
-{
-       unsigned long reg;
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
-       ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
-       ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
-       ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
-       ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
-       ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
-       ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
-       ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
-}
-
-static void sdma_abort_task(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-       u64 status;
-       unsigned long flags;
-
-       if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               return;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
-
-       /* nothing to do */
-       if (status == IPATH_SDMA_ABORT_NONE)
-               goto unlock;
-
-       /* ipath_sdma_abort() is done, waiting for interrupt */
-       if (status == IPATH_SDMA_ABORT_DISARMED) {
-               if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
-                       goto resched_noprint;
-               /* give up, intr got lost somewhere */
-               ipath_dbg("give up waiting for SDMADISABLED intr\n");
-               __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-               status = IPATH_SDMA_ABORT_ABORTED;
-       }
-
-       /* everything is stopped, time to clean up and restart */
-       if (status == IPATH_SDMA_ABORT_ABORTED) {
-               struct ipath_sdma_txreq *txp, *txpnext;
-               u64 hwstatus;
-               int notify = 0;
-
-               hwstatus = ipath_read_kreg64(dd,
-                               dd->ipath_kregs->kr_senddmastatus);
-
-               if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
-                                IPATH_SDMA_STATUS_ABORT_IN_PROG             |
-                                IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
-                   !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
-                       if (dd->ipath_sdma_reset_wait > 0) {
-                               /* not done shutting down sdma */
-                               --dd->ipath_sdma_reset_wait;
-                               goto resched;
-                       }
-                       ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
-                               "status after SDMA reset, continuing\n");
-                       dump_sdma_state(dd);
-               }
-
-               /* dequeue all "sent" requests */
-               list_for_each_entry_safe(txp, txpnext,
-                                        &dd->ipath_sdma_activelist, list) {
-                       txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
-                       if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-                               vl15_watchdog_deq(dd);
-                       list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
-                       notify = 1;
-               }
-               if (notify)
-                       tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-               /* reset our notion of head and tail */
-               dd->ipath_sdma_descq_tail = 0;
-               dd->ipath_sdma_descq_head = 0;
-               dd->ipath_sdma_head_dma[0] = 0;
-               dd->ipath_sdma_generation = 0;
-               dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
-
-               /* Reset SendDmaLenGen */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
-                       (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
-
-               /* done with sdma state for a bit */
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-               /*
-                * Don't restart sdma here (with the exception
-                * below). Wait until link is up to ACTIVE.  VL15 MADs
-                * used to bring the link up use PIO, and multiple link
-                * transitions otherwise cause the sdma engine to be
-                * stopped and started multiple times.
-                * The disable is done here, including the shadow,
-                * so the state is kept consistent.
-                * See ipath_restart_sdma() for the actual starting
-                * of sdma.
-                */
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-               /* make sure I see next message */
-               dd->ipath_sdma_abort_jiffies = 0;
-
-               /*
-                * Not everything that takes SDMA offline is a link
-                * status change.  If the link was up, restart SDMA.
-                */
-               if (dd->ipath_flags & IPATH_LINKACTIVE)
-                       ipath_restart_sdma(dd);
-
-               goto done;
-       }
-
-resched:
-       /*
-        * for now, keep spinning
-        * JAG - this is bad to just have default be a loop without
-        * state change
-        */
-       if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
-               ipath_dbg("looping with status 0x%08lx\n",
-                         dd->ipath_sdma_status);
-               dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
-       }
-resched_noprint:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-       return;
-
-unlock:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-done:
-       return;
-}
-
-/*
- * This is called from interrupt context.
- */
-void ipath_sdma_intr(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       (void) ipath_sdma_make_progress(dd);
-
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-}
-
-static int alloc_sdma(struct ipath_devdata *dd)
-{
-       int ret = 0;
-
-       /* Allocate memory for SendDMA descriptor FIFO */
-       dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
-               SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
-
-       if (!dd->ipath_sdma_descq) {
-               ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
-                       "FIFO memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       dd->ipath_sdma_descq_cnt =
-               SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
-
-       /* Allocate memory for DMA of head register to memory */
-       dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
-               PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
-       if (!dd->ipath_sdma_head_dma) {
-               ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
-               ret = -ENOMEM;
-               goto cleanup_descq;
-       }
-       dd->ipath_sdma_head_dma[0] = 0;
-
-       setup_timer(&dd->ipath_sdma_vl15_timer, vl15_watchdog_timeout,
-                       (unsigned long)dd);
-
-       atomic_set(&dd->ipath_sdma_vl15_count, 0);
-
-       goto done;
-
-cleanup_descq:
-       dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
-               (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
-       dd->ipath_sdma_descq = NULL;
-       dd->ipath_sdma_descq_phys = 0;
-done:
-       return ret;
-}
-
-int setup_sdma(struct ipath_devdata *dd)
-{
-       int ret = 0;
-       unsigned i, n;
-       u64 tmp64;
-       u64 senddmabufmask[3] = { 0 };
-       unsigned long flags;
-
-       ret = alloc_sdma(dd);
-       if (ret)
-               goto done;
-
-       if (!dd->ipath_sdma_descq) {
-               ipath_dev_err(dd, "SendDMA memory not allocated\n");
-               goto done;
-       }
-
-       /*
-        * Set initial status as if we had been up, then gone down.
-        * This lets initial start on transition to ACTIVE be the
-        * same as restart after link flap.
-        */
-       dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
-       dd->ipath_sdma_abort_jiffies = 0;
-       dd->ipath_sdma_generation = 0;
-       dd->ipath_sdma_descq_tail = 0;
-       dd->ipath_sdma_descq_head = 0;
-       dd->ipath_sdma_descq_removed = 0;
-       dd->ipath_sdma_descq_added = 0;
-
-       /* Set SendDmaBase */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
-                        dd->ipath_sdma_descq_phys);
-       /* Set SendDmaLenGen */
-       tmp64 = dd->ipath_sdma_descq_cnt;
-       tmp64 |= 1<<18; /* enable generation checking */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
-       /* Set SendDmaTail */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
-                        dd->ipath_sdma_descq_tail);
-       /* Set SendDmaHeadAddr */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
-                        dd->ipath_sdma_head_phys);
-
-       /*
-        * Reserve all the former "kernel" piobufs, using high number range
-        * so we get as many 4K buffers as possible
-        */
-       n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
-       ipath_chg_pioavailkernel(dd, i, n - i , 0);
-       for (; i < n; ++i) {
-               unsigned word = i / 64;
-               unsigned bit = i & 63;
-               BUG_ON(word >= 3);
-               senddmabufmask[word] |= 1ULL << bit;
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
-                        senddmabufmask[0]);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
-                        senddmabufmask[1]);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
-                        senddmabufmask[2]);
-
-       INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
-       INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
-
-       tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
-                    (unsigned long) dd);
-       tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
-                    (unsigned long) dd);
-
-       /*
-        * No use to turn on SDMA here, as link is probably not ACTIVE
-        * Just mark it RUNNING and enable the interrupt, and let the
-        * ipath_restart_sdma() on link transition to ACTIVE actually
-        * enable it.
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-done:
-       return ret;
-}
-
-void teardown_sdma(struct ipath_devdata *dd)
-{
-       struct ipath_sdma_txreq *txp, *txpnext;
-       unsigned long flags;
-       dma_addr_t sdma_head_phys = 0;
-       dma_addr_t sdma_descq_phys = 0;
-       void *sdma_descq = NULL;
-       void *sdma_head_dma = NULL;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
-       __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-       __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       tasklet_kill(&dd->ipath_sdma_abort_task);
-       tasklet_kill(&dd->ipath_sdma_notify_task);
-
-       /* turn off sdma */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-               dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       /* dequeue all "sent" requests */
-       list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
-                                list) {
-               txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
-               if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-                       vl15_watchdog_deq(dd);
-               list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
-       }
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       sdma_notify_taskbody(dd);
-
-       del_timer_sync(&dd->ipath_sdma_vl15_timer);
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       dd->ipath_sdma_abort_jiffies = 0;
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
-
-       if (dd->ipath_sdma_head_dma) {
-               sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
-               sdma_head_phys = dd->ipath_sdma_head_phys;
-               dd->ipath_sdma_head_dma = NULL;
-               dd->ipath_sdma_head_phys = 0;
-       }
-
-       if (dd->ipath_sdma_descq) {
-               sdma_descq = dd->ipath_sdma_descq;
-               sdma_descq_phys = dd->ipath_sdma_descq_phys;
-               dd->ipath_sdma_descq = NULL;
-               dd->ipath_sdma_descq_phys = 0;
-       }
-
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       if (sdma_head_dma)
-               dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-                                 sdma_head_dma, sdma_head_phys);
-
-       if (sdma_descq)
-               dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
-                                 sdma_descq, sdma_descq_phys);
-}
-
-/*
- * [Re]start SDMA, if we use it, and it's not already OK.
- * This is called on transition to link ACTIVE, either the first or
- * subsequent times.
- */
-void ipath_restart_sdma(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       int needed = 1;
-
-       if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
-               goto bail;
-
-       /*
-        * First, make sure we should, which is to say,
-        * check that we are "RUNNING" (not in teardown)
-        * and not "SHUTDOWN"
-        */
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
-               || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-                       needed = 0;
-       else {
-               __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-               __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-               __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-       }
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       if (!needed) {
-               ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
-                       dd->ipath_sdma_status);
-               goto bail;
-       }
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       /*
-        * First clear, just to be safe. Enable is only done
-        * in chip on 0->1 transition
-        */
-       dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /* notify upper layers */
-       ipath_ib_piobufavail(dd->verbs_dev);
-
-bail:
-       return;
-}
-
-static inline void make_sdma_desc(struct ipath_devdata *dd,
-       u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
-{
-       WARN_ON(addr & 3);
-       /* SDmaPhyAddr[47:32] */
-       sdmadesc[1] = addr >> 32;
-       /* SDmaPhyAddr[31:0] */
-       sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
-       /* SDmaGeneration[1:0] */
-       sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
-       /* SDmaDwordCount[10:0] */
-       sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
-       /* SDmaBufOffset[12:2] */
-       sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- *    the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- *    (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int ipath_sdma_verbs_send(struct ipath_devdata *dd,
-       struct ipath_sge_state *ss, u32 dwords,
-       struct ipath_verbs_txreq *tx)
-{
-
-       unsigned long flags;
-       struct ipath_sge *sge;
-       int ret = 0;
-       u16 tail;
-       __le64 *descqp;
-       u64 sdmadesc[2];
-       u32 dwoffset;
-       dma_addr_t addr;
-
-       if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
-               ipath_dbg("packet size %X > ibmax %X, fail\n",
-                       tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
-               ret = -EMSGSIZE;
-               goto fail;
-       }
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-retry:
-       if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
-               ret = -EBUSY;
-               goto unlock;
-       }
-
-       if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
-               if (ipath_sdma_make_progress(dd))
-                       goto retry;
-               ret = -ENOBUFS;
-               goto unlock;
-       }
-
-       addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
-                             tx->map_len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&dd->pcidev->dev, addr))
-               goto ioerr;
-
-       dwoffset = tx->map_len >> 2;
-       make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
-
-       /* SDmaFirstDesc */
-       sdmadesc[0] |= 1ULL << 12;
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
-               sdmadesc[0] |= 1ULL << 14;      /* SDmaUseLargeBuf */
-
-       /* write to the descq */
-       tail = dd->ipath_sdma_descq_tail;
-       descqp = &dd->ipath_sdma_descq[tail].qw[0];
-       *descqp++ = cpu_to_le64(sdmadesc[0]);
-       *descqp++ = cpu_to_le64(sdmadesc[1]);
-
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
-               tx->txreq.start_idx = tail;
-
-       /* increment the tail */
-       if (++tail == dd->ipath_sdma_descq_cnt) {
-               tail = 0;
-               descqp = &dd->ipath_sdma_descq[0].qw[0];
-               ++dd->ipath_sdma_generation;
-       }
-
-       sge = &ss->sge;
-       while (dwords) {
-               u32 dw;
-               u32 len;
-
-               len = dwords << 2;
-               if (len > sge->length)
-                       len = sge->length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               dw = (len + 3) >> 2;
-               addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
-                                     DMA_TO_DEVICE);
-               if (dma_mapping_error(&dd->pcidev->dev, addr))
-                       goto unmap;
-               make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
-               /* SDmaUseLargeBuf has to be set in every descriptor */
-               if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
-                       sdmadesc[0] |= 1ULL << 14;
-               /* write to the descq */
-               *descqp++ = cpu_to_le64(sdmadesc[0]);
-               *descqp++ = cpu_to_le64(sdmadesc[1]);
-
-               /* increment the tail */
-               if (++tail == dd->ipath_sdma_descq_cnt) {
-                       tail = 0;
-                       descqp = &dd->ipath_sdma_descq[0].qw[0];
-                       ++dd->ipath_sdma_generation;
-               }
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-
-               dwoffset += dw;
-               dwords -= dw;
-       }
-
-       if (!tail)
-               descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
-       descqp -= 2;
-       /* SDmaLastDesc */
-       descqp[0] |= cpu_to_le64(1ULL << 11);
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
-               /* SDmaIntReq */
-               descqp[0] |= cpu_to_le64(1ULL << 15);
-       }
-
-       /* Commit writes to memory and advance the tail on the chip */
-       wmb();
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-
-       tx->txreq.next_descq_idx = tail;
-       tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
-       dd->ipath_sdma_descq_tail = tail;
-       dd->ipath_sdma_descq_added += tx->txreq.sg_count;
-       list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
-               vl15_watchdog_enq(dd);
-       goto unlock;
-
-unmap:
-       while (tail != dd->ipath_sdma_descq_tail) {
-               if (!tail)
-                       tail = dd->ipath_sdma_descq_cnt - 1;
-               else
-                       tail--;
-               unmap_desc(dd, tail);
-       }
-ioerr:
-       ret = -EIO;
-unlock:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-fail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_srq.c b/drivers/staging/rdma/ipath/ipath_srq.c
deleted file mode 100644 (file)
index 2627198..0000000
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
-                          struct ib_recv_wr **bad_wr)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-       struct ipath_rwq *wq;
-       unsigned long flags;
-       int ret;
-
-       for (; wr; wr = wr->next) {
-               struct ipath_rwqe *wqe;
-               u32 next;
-               int i;
-
-               if ((unsigned) wr->num_sge > srq->rq.max_sge) {
-                       *bad_wr = wr;
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               spin_lock_irqsave(&srq->rq.lock, flags);
-               wq = srq->rq.wq;
-               next = wq->head + 1;
-               if (next >= srq->rq.size)
-                       next = 0;
-               if (next == wq->tail) {
-                       spin_unlock_irqrestore(&srq->rq.lock, flags);
-                       *bad_wr = wr;
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               wqe = get_rwqe_ptr(&srq->rq, wq->head);
-               wqe->wr_id = wr->wr_id;
-               wqe->num_sge = wr->num_sge;
-               for (i = 0; i < wr->num_sge; i++)
-                       wqe->sg_list[i] = wr->sg_list[i];
-               /* Make sure queue entry is written before the head index. */
-               smp_wmb();
-               wq->head = next;
-               spin_unlock_irqrestore(&srq->rq.lock, flags);
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libipathverbs when creating a user SRQ
- */
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
-                               struct ib_srq_init_attr *srq_init_attr,
-                               struct ib_udata *udata)
-{
-       struct ipath_ibdev *dev = to_idev(ibpd->device);
-       struct ipath_srq *srq;
-       u32 sz;
-       struct ib_srq *ret;
-
-       if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
-               ret = ERR_PTR(-ENOSYS);
-               goto done;
-       }
-
-       if (srq_init_attr->attr.max_wr == 0) {
-               ret = ERR_PTR(-EINVAL);
-               goto done;
-       }
-
-       if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
-           (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
-               ret = ERR_PTR(-EINVAL);
-               goto done;
-       }
-
-       srq = kmalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq) {
-               ret = ERR_PTR(-ENOMEM);
-               goto done;
-       }
-
-       /*
-        * Need to use vmalloc() if we want to support large #s of entries.
-        */
-       srq->rq.size = srq_init_attr->attr.max_wr + 1;
-       srq->rq.max_sge = srq_init_attr->attr.max_sge;
-       sz = sizeof(struct ib_sge) * srq->rq.max_sge +
-               sizeof(struct ipath_rwqe);
-       srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
-       if (!srq->rq.wq) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_srq;
-       }
-
-       /*
-        * Return the address of the RWQ as the offset to mmap.
-        * See ipath_mmap() for details.
-        */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               int err;
-               u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
-
-               srq->ip =
-                   ipath_create_mmap_info(dev, s,
-                                          ibpd->uobject->context,
-                                          srq->rq.wq);
-               if (!srq->ip) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_wq;
-               }
-
-               err = ib_copy_to_udata(udata, &srq->ip->offset,
-                                      sizeof(srq->ip->offset));
-               if (err) {
-                       ret = ERR_PTR(err);
-                       goto bail_ip;
-               }
-       } else
-               srq->ip = NULL;
-
-       /*
-        * ib_create_srq() will initialize srq->ibsrq.
-        */
-       spin_lock_init(&srq->rq.lock);
-       srq->rq.wq->head = 0;
-       srq->rq.wq->tail = 0;
-       srq->limit = srq_init_attr->attr.srq_limit;
-
-       spin_lock(&dev->n_srqs_lock);
-       if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
-               spin_unlock(&dev->n_srqs_lock);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_ip;
-       }
-
-       dev->n_srqs_allocated++;
-       spin_unlock(&dev->n_srqs_lock);
-
-       if (srq->ip) {
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       ret = &srq->ibsrq;
-       goto done;
-
-bail_ip:
-       kfree(srq->ip);
-bail_wq:
-       vfree(srq->rq.wq);
-bail_srq:
-       kfree(srq);
-done:
-       return ret;
-}
-
-/**
- * ipath_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for ipathverbs.so
- */
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                    enum ib_srq_attr_mask attr_mask,
-                    struct ib_udata *udata)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-       struct ipath_rwq *wq;
-       int ret = 0;
-
-       if (attr_mask & IB_SRQ_MAX_WR) {
-               struct ipath_rwq *owq;
-               struct ipath_rwqe *p;
-               u32 sz, size, n, head, tail;
-
-               /* Check that the requested sizes are below the limits. */
-               if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
-                   ((attr_mask & IB_SRQ_LIMIT) ?
-                    attr->srq_limit : srq->limit) > attr->max_wr) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               sz = sizeof(struct ipath_rwqe) +
-                       srq->rq.max_sge * sizeof(struct ib_sge);
-               size = attr->max_wr + 1;
-               wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
-               if (!wq) {
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               /* Check that we can write the offset to mmap. */
-               if (udata && udata->inlen >= sizeof(__u64)) {
-                       __u64 offset_addr;
-                       __u64 offset = 0;
-
-                       ret = ib_copy_from_udata(&offset_addr, udata,
-                                                sizeof(offset_addr));
-                       if (ret)
-                               goto bail_free;
-                       udata->outbuf =
-                               (void __user *) (unsigned long) offset_addr;
-                       ret = ib_copy_to_udata(udata, &offset,
-                                              sizeof(offset));
-                       if (ret)
-                               goto bail_free;
-               }
-
-               spin_lock_irq(&srq->rq.lock);
-               /*
-                * validate head pointer value and compute
-                * the number of remaining WQEs.
-                */
-               owq = srq->rq.wq;
-               head = owq->head;
-               if (head >= srq->rq.size)
-                       head = 0;
-               tail = owq->tail;
-               if (tail >= srq->rq.size)
-                       tail = 0;
-               n = head;
-               if (n < tail)
-                       n += srq->rq.size - tail;
-               else
-                       n -= tail;
-               if (size <= n) {
-                       ret = -EINVAL;
-                       goto bail_unlock;
-               }
-               n = 0;
-               p = wq->wq;
-               while (tail != head) {
-                       struct ipath_rwqe *wqe;
-                       int i;
-
-                       wqe = get_rwqe_ptr(&srq->rq, tail);
-                       p->wr_id = wqe->wr_id;
-                       p->num_sge = wqe->num_sge;
-                       for (i = 0; i < wqe->num_sge; i++)
-                               p->sg_list[i] = wqe->sg_list[i];
-                       n++;
-                       p = (struct ipath_rwqe *)((char *) p + sz);
-                       if (++tail >= srq->rq.size)
-                               tail = 0;
-               }
-               srq->rq.wq = wq;
-               srq->rq.size = size;
-               wq->head = n;
-               wq->tail = 0;
-               if (attr_mask & IB_SRQ_LIMIT)
-                       srq->limit = attr->srq_limit;
-               spin_unlock_irq(&srq->rq.lock);
-
-               vfree(owq);
-
-               if (srq->ip) {
-                       struct ipath_mmap_info *ip = srq->ip;
-                       struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
-                       u32 s = sizeof(struct ipath_rwq) + size * sz;
-
-                       ipath_update_mmap_info(dev, ip, s, wq);
-
-                       /*
-                        * Return the offset to mmap.
-                        * See ipath_mmap() for details.
-                        */
-                       if (udata && udata->inlen >= sizeof(__u64)) {
-                               ret = ib_copy_to_udata(udata, &ip->offset,
-                                                      sizeof(ip->offset));
-                               if (ret)
-                                       goto bail;
-                       }
-
-                       spin_lock_irq(&dev->pending_lock);
-                       if (list_empty(&ip->pending_mmaps))
-                               list_add(&ip->pending_mmaps,
-                                        &dev->pending_mmaps);
-                       spin_unlock_irq(&dev->pending_lock);
-               }
-       } else if (attr_mask & IB_SRQ_LIMIT) {
-               spin_lock_irq(&srq->rq.lock);
-               if (attr->srq_limit >= srq->rq.size)
-                       ret = -EINVAL;
-               else
-                       srq->limit = attr->srq_limit;
-               spin_unlock_irq(&srq->rq.lock);
-       }
-       goto bail;
-
-bail_unlock:
-       spin_unlock_irq(&srq->rq.lock);
-bail_free:
-       vfree(wq);
-bail:
-       return ret;
-}
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-
-       attr->max_wr = srq->rq.size - 1;
-       attr->max_sge = srq->rq.max_sge;
-       attr->srq_limit = srq->limit;
-       return 0;
-}
-
-/**
- * ipath_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int ipath_destroy_srq(struct ib_srq *ibsrq)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-       struct ipath_ibdev *dev = to_idev(ibsrq->device);
-
-       spin_lock(&dev->n_srqs_lock);
-       dev->n_srqs_allocated--;
-       spin_unlock(&dev->n_srqs_lock);
-       if (srq->ip)
-               kref_put(&srq->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(srq->rq.wq);
-       kfree(srq);
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_stats.c b/drivers/staging/rdma/ipath/ipath_stats.c
deleted file mode 100644 (file)
index f63e143..0000000
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_kernel.h"
-
-struct infinipath_stats ipath_stats;
-
-/**
- * ipath_snap_cntr - snapshot a chip counter
- * @dd: the infinipath device
- * @creg: the counter to snapshot
- *
- * called from add_timer and user counter read calls, to deal with
- * counters that wrap in "human time".  The words sent and received, and
- * the packets sent and received are all that we worry about.  For now,
- * at least, we don't worry about error counters, because if they wrap
- * that quickly, we probably don't care.  We may eventually just make this
- * handle all the counters.  word counters can wrap in about 20 seconds
- * of full bandwidth traffic, packet counters in a few hours.
- */
-
-u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
-{
-       u32 val, reg64 = 0;
-       u64 val64;
-       unsigned long t0, t1;
-       u64 ret;
-
-       t0 = jiffies;
-       /* If fast increment counters are only 32 bits, snapshot them,
-        * and maintain them as 64bit values in the driver */
-       if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
-           (creg == dd->ipath_cregs->cr_wordsendcnt ||
-            creg == dd->ipath_cregs->cr_wordrcvcnt ||
-            creg == dd->ipath_cregs->cr_pktsendcnt ||
-            creg == dd->ipath_cregs->cr_pktrcvcnt)) {
-               val64 = ipath_read_creg(dd, creg);
-               val = val64 == ~0ULL ? ~0U : 0;
-               reg64 = 1;
-       } else                  /* val64 just to keep gcc quiet... */
-               val64 = val = ipath_read_creg32(dd, creg);
-       /*
-        * See if a second has passed.  This is just a way to detect things
-        * that are quite broken.  Normally this should take just a few
-        * cycles (the check is for long enough that we don't care if we get
-        * pre-empted.)  An Opteron HT O read timeout is 4 seconds with
-        * normal NB values
-        */
-       t1 = jiffies;
-       if (time_before(t0 + HZ, t1) && val == -1) {
-               ipath_dev_err(dd, "Error!  Read counter 0x%x timed out\n",
-                             creg);
-               ret = 0ULL;
-               goto bail;
-       }
-       if (reg64) {
-               ret = val64;
-               goto bail;
-       }
-
-       if (creg == dd->ipath_cregs->cr_wordsendcnt) {
-               if (val != dd->ipath_lastsword) {
-                       dd->ipath_sword += val - dd->ipath_lastsword;
-                       dd->ipath_lastsword = val;
-               }
-               val64 = dd->ipath_sword;
-       } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
-               if (val != dd->ipath_lastrword) {
-                       dd->ipath_rword += val - dd->ipath_lastrword;
-                       dd->ipath_lastrword = val;
-               }
-               val64 = dd->ipath_rword;
-       } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
-               if (val != dd->ipath_lastspkts) {
-                       dd->ipath_spkts += val - dd->ipath_lastspkts;
-                       dd->ipath_lastspkts = val;
-               }
-               val64 = dd->ipath_spkts;
-       } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
-               if (val != dd->ipath_lastrpkts) {
-                       dd->ipath_rpkts += val - dd->ipath_lastrpkts;
-                       dd->ipath_lastrpkts = val;
-               }
-               val64 = dd->ipath_rpkts;
-       } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) {
-               if (dd->ibdeltainprog)
-                       val64 -= val64 - dd->ibsymsnap;
-               val64 -= dd->ibsymdelta;
-       } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) {
-               if (dd->ibdeltainprog)
-                       val64 -= val64 - dd->iblnkerrsnap;
-               val64 -= dd->iblnkerrdelta;
-       } else
-               val64 = (u64) val;
-
-       ret = val64;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
- * @dd: the infinipath device
- *
- * print the delta of egrfull/hdrqfull errors for kernel ports no more than
- * every 5 seconds.  User processes are printed at close, but kernel doesn't
- * close, so...  Separate routine so may call from other places someday, and
- * so function name when printed by _IPATH_INFO is meaningfull
- */
-static void ipath_qcheck(struct ipath_devdata *dd)
-{
-       static u64 last_tot_hdrqfull;
-       struct ipath_portdata *pd = dd->ipath_pd[0];
-       size_t blen = 0;
-       char buf[128];
-       u32 hdrqtail;
-
-       *buf = 0;
-       if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
-               blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
-                               pd->port_hdrqfull -
-                               dd->ipath_p0_hdrqfull);
-               dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
-       }
-       if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
-               blen += snprintf(buf + blen, sizeof buf - blen,
-                                "%srcvegrfull %llu",
-                                blen ? ", " : "",
-                                (unsigned long long)
-                                (ipath_stats.sps_etidfull -
-                                 dd->ipath_last_tidfull));
-               dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
-       }
-
-       /*
-        * this is actually the number of hdrq full interrupts, not actual
-        * events, but at the moment that's mostly what I'm interested in.
-        * Actual count, etc. is in the counters, if needed.  For production
-        * users this won't ordinarily be printed.
-        */
-
-       if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
-           ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
-               blen += snprintf(buf + blen, sizeof buf - blen,
-                                "%shdrqfull %llu (all ports)",
-                                blen ? ", " : "",
-                                (unsigned long long)
-                                (ipath_stats.sps_hdrqfull -
-                                 last_tot_hdrqfull));
-               last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
-       }
-       if (blen)
-               ipath_dbg("%s\n", buf);
-
-       hdrqtail = ipath_get_hdrqtail(pd);
-       if (pd->port_head != hdrqtail) {
-               if (dd->ipath_lastport0rcv_cnt ==
-                   ipath_stats.sps_port0pkts) {
-                       ipath_cdbg(PKT, "missing rcv interrupts? "
-                                  "port0 hd=%x tl=%x; port0pkts %llx; write"
-                                  " hd (w/intr)\n",
-                                  pd->port_head, hdrqtail,
-                                  (unsigned long long)
-                                  ipath_stats.sps_port0pkts);
-                       ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
-                               dd->ipath_rhdrhead_intr_off, pd->port_port);
-               }
-               dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
-       }
-}
-
-static void ipath_chk_errormask(struct ipath_devdata *dd)
-{
-       static u32 fixed;
-       u32 ctrl;
-       unsigned long errormask;
-       unsigned long hwerrs;
-
-       if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
-               return;
-
-       errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-
-       if (errormask == dd->ipath_errormask)
-               return;
-       fixed++;
-
-       hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-       ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-
-       if ((hwerrs & dd->ipath_hwerrmask) ||
-               (ctrl & INFINIPATH_C_FREEZEMODE)) {
-               /* force re-interrupt of pending events, just in case */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-               dev_info(&dd->pcidev->dev,
-                       "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
-                       fixed, errormask, (unsigned long)dd->ipath_errormask,
-                       ctrl, hwerrs);
-       } else
-               ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
-                       fixed, errormask,
-                       (unsigned long)dd->ipath_errormask);
-}
-
-
-/**
- * ipath_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the infinipath device ipath_devdata
- *
- * called from add_timer
- */
-void ipath_get_faststats(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-       int i;
-       static unsigned cnt;
-       unsigned long flags;
-       u64 traffic_wds;
-
-       /*
-        * don't access the chip while running diags, or memory diags can
-        * fail
-        */
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
-           ipath_diag_inuse)
-               /* but re-arm the timer, for diags case; won't hurt other */
-               goto done;
-
-       /*
-        * We now try to maintain a "active timer", based on traffic
-        * exceeding a threshold, so we need to check the word-counts
-        * even if they are 64-bit.
-        */
-       traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-       spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-       traffic_wds -= dd->ipath_traffic_wds;
-       dd->ipath_traffic_wds += traffic_wds;
-       if (traffic_wds  >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->ipath_active_time); /* S/B #define */
-       spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-
-       if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-       }
-
-       ipath_qcheck(dd);
-
-       /*
-        * deal with repeat error suppression.  Doesn't really matter if
-        * last error was almost a full interval ago, or just a few usecs
-        * ago; still won't get more than 2 per interval.  We may want
-        * longer intervals for this eventually, could do with mod, counter
-        * or separate timer.  Also see code in ipath_handle_errors() and
-        * ipath_handle_hwerrors().
-        */
-
-       if (dd->ipath_lasterror)
-               dd->ipath_lasterror = 0;
-       if (dd->ipath_lasthwerror)
-               dd->ipath_lasthwerror = 0;
-       if (dd->ipath_maskederrs
-           && time_after(jiffies, dd->ipath_unmasktime)) {
-               char ebuf[256];
-               int iserr;
-               iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
-                                        dd->ipath_maskederrs);
-               if (dd->ipath_maskederrs &
-                   ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-                     INFINIPATH_E_PKTERRS))
-                       ipath_dev_err(dd, "Re-enabling masked errors "
-                                     "(%s)\n", ebuf);
-               else {
-                       /*
-                        * rcvegrfull and rcvhdrqfull are "normal", for some
-                        * types of processes (mostly benchmarks) that send
-                        * huge numbers of messages, while not processing
-                        * them.  So only complain about these at debug
-                        * level.
-                        */
-                       if (iserr)
-                               ipath_dbg(
-                                       "Re-enabling queue full errors (%s)\n",
-                                       ebuf);
-                       else
-                               ipath_cdbg(ERRPKT, "Re-enabling packet"
-                                       " problem interrupt (%s)\n", ebuf);
-               }
-
-               /* re-enable masked errors */
-               dd->ipath_errormask |= dd->ipath_maskederrs;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                                dd->ipath_errormask);
-               dd->ipath_maskederrs = 0;
-       }
-
-       /* limit qfull messages to ~one per minute per port */
-       if ((++cnt & 0x10)) {
-               for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
-                       struct ipath_portdata *pd = dd->ipath_pd[i];
-
-                       if (pd && pd->port_lastrcvhdrqtail != -1)
-                               pd->port_lastrcvhdrqtail = -1;
-               }
-       }
-
-       ipath_chk_errormask(dd);
-done:
-       mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sysfs.c b/drivers/staging/rdma/ipath/ipath_sysfs.c
deleted file mode 100644 (file)
index b12b1f6..0000000
+++ /dev/null
@@ -1,1237 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/ctype.h>
-#include <linux/stat.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-/**
- * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
- * @str: the string containing the number
- * @valp: where to put the result
- *
- * returns the number of bytes consumed, or negative value on error
- */
-int ipath_parse_ushort(const char *str, unsigned short *valp)
-{
-       unsigned long val;
-       char *end;
-       int ret;
-
-       if (!isdigit(str[0])) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       val = simple_strtoul(str, &end, 0);
-
-       if (val > 0xffff) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       *valp = val;
-
-       ret = end + 1 - str;
-       if (ret == 0)
-               ret = -EINVAL;
-
-bail:
-       return ret;
-}
-
-static ssize_t show_version(struct device_driver *dev, char *buf)
-{
-       /* The string printed here is already newline-terminated. */
-       return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
-}
-
-static ssize_t show_num_units(struct device_driver *dev, char *buf)
-{
-       return scnprintf(buf, PAGE_SIZE, "%d\n",
-                        ipath_count_units(NULL, NULL, NULL));
-}
-
-static ssize_t show_status(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-
-       if (!dd->ipath_statusp) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
-                       (unsigned long long) *(dd->ipath_statusp));
-
-bail:
-       return ret;
-}
-
-static const char *ipath_status_str[] = {
-       "Initted",
-       "Disabled",
-       "Admin_Disabled",
-       "", /* This used to be the old "OIB_SMA" status. */
-       "", /* This used to be the old "SMA" status. */
-       "Present",
-       "IB_link_up",
-       "IB_configured",
-       "NoIBcable",
-       "Fatal_Hardware_Error",
-       NULL,
-};
-
-static ssize_t show_status_str(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int i, any;
-       u64 s;
-       ssize_t ret;
-
-       if (!dd->ipath_statusp) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       s = *(dd->ipath_statusp);
-       *buf = '\0';
-       for (any = i = 0; s && ipath_status_str[i]; i++) {
-               if (s & 1) {
-                       if (any && strlcat(buf, " ", PAGE_SIZE) >=
-                           PAGE_SIZE)
-                               /* overflow */
-                               break;
-                       if (strlcat(buf, ipath_status_str[i],
-                                   PAGE_SIZE) >= PAGE_SIZE)
-                               break;
-                       any = 1;
-               }
-               s >>= 1;
-       }
-       if (any)
-               strlcat(buf, "\n", PAGE_SIZE);
-
-       ret = strlen(buf);
-
-bail:
-       return ret;
-}
-
-static ssize_t show_boardversion(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       /* The string printed here is already newline-terminated. */
-       return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
-}
-
-static ssize_t show_localbus_info(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       /* The string printed here is already newline-terminated. */
-       return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
-}
-
-static ssize_t show_lmc(struct device *dev,
-                       struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_lmc);
-}
-
-static ssize_t store_lmc(struct device *dev,
-                        struct device_attribute *attr,
-                        const char *buf,
-                        size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 lmc = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &lmc);
-       if (ret < 0)
-               goto invalid;
-
-       if (lmc > 7) {
-               ret = -EINVAL;
-               goto invalid;
-       }
-
-       ipath_set_lid(dd, dd->ipath_lid, lmc);
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid LMC %u\n", lmc);
-bail:
-       return ret;
-}
-
-static ssize_t show_lid(struct device *dev,
-                       struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
-}
-
-static ssize_t store_lid(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 lid = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &lid);
-       if (ret < 0)
-               goto invalid;
-
-       if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) {
-               ret = -EINVAL;
-               goto invalid;
-       }
-
-       ipath_set_lid(dd, lid, dd->ipath_lmc);
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid);
-bail:
-       return ret;
-}
-
-static ssize_t show_mlid(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
-}
-
-static ssize_t store_mlid(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 mlid;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &mlid);
-       if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
-               goto invalid;
-
-       dd->ipath_mlid = mlid;
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid MLID\n");
-bail:
-       return ret;
-}
-
-static ssize_t show_guid(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u8 *guid;
-
-       guid = (u8 *) & (dd->ipath_guid);
-
-       return scnprintf(buf, PAGE_SIZE,
-                        "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                        guid[0], guid[1], guid[2], guid[3],
-                        guid[4], guid[5], guid[6], guid[7]);
-}
-
-static ssize_t store_guid(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-       unsigned short guid[8];
-       __be64 new_guid;
-       u8 *ng;
-       int i;
-
-       if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
-                  &guid[0], &guid[1], &guid[2], &guid[3],
-                  &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
-               goto invalid;
-
-       ng = (u8 *) &new_guid;
-
-       for (i = 0; i < 8; i++) {
-               if (guid[i] > 0xff)
-                       goto invalid;
-               ng[i] = guid[i];
-       }
-
-       if (new_guid == 0)
-               goto invalid;
-
-       dd->ipath_guid = new_guid;
-       dd->ipath_nguid = 1;
-       if (dd->verbs_dev)
-               dd->verbs_dev->ibdev.node_guid = new_guid;
-
-       ret = strlen(buf);
-       goto bail;
-
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid GUID\n");
-       ret = -EINVAL;
-
-bail:
-       return ret;
-}
-
-static ssize_t show_nguid(struct device *dev,
-                         struct device_attribute *attr,
-                         char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
-}
-
-static ssize_t show_nports(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       /* Return the number of user ports available. */
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1);
-}
-
-static ssize_t show_serial(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       buf[sizeof dd->ipath_serial] = '\0';
-       memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
-       strcat(buf, "\n");
-       return strlen(buf);
-}
-
-static ssize_t show_unit(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
-}
-
-static ssize_t show_jint_max_packets(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
-}
-
-static ssize_t store_jint_max_packets(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf,
-                                     size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 v = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &v);
-       if (ret < 0)
-               ipath_dev_err(dd, "invalid jint_max_packets.\n");
-       else
-               dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
-
-       return ret;
-}
-
-static ssize_t show_jint_idle_ticks(struct device *dev,
-                                   struct device_attribute *attr,
-                                   char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
-}
-
-static ssize_t store_jint_idle_ticks(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf,
-                                    size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 v = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &v);
-       if (ret < 0)
-               ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
-       else
-               dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
-
-       return ret;
-}
-
-#define DEVICE_COUNTER(name, attr) \
-       static ssize_t show_counter_##name(struct device *dev, \
-                                          struct device_attribute *attr, \
-                                          char *buf) \
-       { \
-               struct ipath_devdata *dd = dev_get_drvdata(dev); \
-               return scnprintf(\
-                       buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
-                       ipath_snap_cntr( \
-                               dd, offsetof(struct infinipath_counters, \
-                                            attr) / sizeof(u64)));     \
-       } \
-       static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
-
-DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
-DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
-DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
-DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
-DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
-DEVICE_COUNTER(lb_ints, LBIntCnt);
-DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
-DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
-DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
-DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
-DEVICE_COUNTER(rx_dwords, RxDwordCnt);
-DEVICE_COUNTER(rx_ebps, RxEBPCnt);
-DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
-DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
-DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
-DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
-DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
-DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
-DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
-DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
-DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
-DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
-DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
-DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
-DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
-DEVICE_COUNTER(tx_dwords, TxDwordCnt);
-DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
-DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
-DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
-DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
-DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
-DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
-
-static struct attribute *dev_counter_attributes[] = {
-       &dev_attr_ib_link_downeds.attr,
-       &dev_attr_ib_link_err_recoveries.attr,
-       &dev_attr_ib_status_changes.attr,
-       &dev_attr_ib_symbol_errs.attr,
-       &dev_attr_lb_flow_stalls.attr,
-       &dev_attr_lb_ints.attr,
-       &dev_attr_rx_bad_formats.attr,
-       &dev_attr_rx_buf_ovfls.attr,
-       &dev_attr_rx_data_pkts.attr,
-       &dev_attr_rx_dropped_pkts.attr,
-       &dev_attr_rx_dwords.attr,
-       &dev_attr_rx_ebps.attr,
-       &dev_attr_rx_flow_ctrl_errs.attr,
-       &dev_attr_rx_flow_pkts.attr,
-       &dev_attr_rx_icrc_errs.attr,
-       &dev_attr_rx_len_errs.attr,
-       &dev_attr_rx_link_problems.attr,
-       &dev_attr_rx_lpcrc_errs.attr,
-       &dev_attr_rx_max_min_len_errs.attr,
-       &dev_attr_rx_p0_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p1_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p2_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p3_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p4_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p5_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p6_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p7_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p8_hdr_egr_ovfls.attr,
-       &dev_attr_rx_pkey_mismatches.attr,
-       &dev_attr_rx_tid_full_errs.attr,
-       &dev_attr_rx_tid_valid_errs.attr,
-       &dev_attr_rx_vcrc_errs.attr,
-       &dev_attr_tx_data_pkts.attr,
-       &dev_attr_tx_dropped_pkts.attr,
-       &dev_attr_tx_dwords.attr,
-       &dev_attr_tx_flow_pkts.attr,
-       &dev_attr_tx_flow_stalls.attr,
-       &dev_attr_tx_len_errs.attr,
-       &dev_attr_tx_max_min_len_errs.attr,
-       &dev_attr_tx_underruns.attr,
-       &dev_attr_tx_unsup_vl_errs.attr,
-       NULL
-};
-
-static struct attribute_group dev_counter_attr_group = {
-       .name = "counters",
-       .attrs = dev_counter_attributes
-};
-
-static ssize_t store_reset(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       if (count < 5 || memcmp(buf, "reset", 5)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (dd->ipath_flags & IPATH_DISABLED) {
-               /*
-                * post-reset init would re-enable interrupts, etc.
-                * so don't allow reset on disabled devices.  Not
-                * perfect error, but about the best choice.
-                */
-               dev_info(dev,"Unit %d is disabled, can't reset\n",
-                        dd->ipath_unit);
-               ret = -EINVAL;
-               goto bail;
-       }
-       ret = ipath_reset_device(dd->ipath_unit);
-bail:
-       return ret<0 ? ret : count;
-}
-
-static ssize_t store_link_state(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 state;
-
-       ret = ipath_parse_ushort(buf, &state);
-       if (ret < 0)
-               goto invalid;
-
-       r = ipath_set_linkstate(dd, state);
-       if (r < 0) {
-               ret = r;
-               goto bail;
-       }
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid link state\n");
-bail:
-       return ret;
-}
-
-static ssize_t show_mtu(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
-}
-
-static ssize_t store_mtu(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-       u16 mtu = 0;
-       int r;
-
-       ret = ipath_parse_ushort(buf, &mtu);
-       if (ret < 0)
-               goto invalid;
-
-       r = ipath_set_mtu(dd, mtu);
-       if (r < 0)
-               ret = r;
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid MTU\n");
-bail:
-       return ret;
-}
-
-static ssize_t show_enabled(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       return scnprintf(buf, PAGE_SIZE, "%u\n",
-                        (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
-}
-
-static ssize_t store_enabled(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-       u16 enable = 0;
-
-       ret = ipath_parse_ushort(buf, &enable);
-       if (ret < 0) {
-               ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
-               goto bail;
-       }
-
-       if (enable) {
-               if (!(dd->ipath_flags & IPATH_DISABLED))
-                       goto bail;
-
-               dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
-               /* same as post-reset */
-               ret = ipath_init_chip(dd, 1);
-               if (ret)
-                       ipath_dev_err(dd, "Failed to enable unit %d\n",
-                                     dd->ipath_unit);
-               else {
-                       dd->ipath_flags &= ~IPATH_DISABLED;
-                       *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
-               }
-       } else if (!(dd->ipath_flags & IPATH_DISABLED)) {
-               dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
-               ipath_shutdown_device(dd);
-               dd->ipath_flags |= IPATH_DISABLED;
-               *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
-       }
-
-bail:
-       return ret;
-}
-
-static ssize_t store_rx_pol_inv(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret < 0)
-               goto invalid;
-
-       r = ipath_set_rx_pol_inv(dd, val);
-       if (r < 0) {
-               ret = r;
-               goto bail;
-       }
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
-bail:
-       return ret;
-}
-
-static ssize_t store_led_override(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret > 0)
-               ipath_set_led_override(dd, val);
-       else
-               ipath_dev_err(dd, "attempt to set invalid LED override\n");
-       return ret;
-}
-
-static ssize_t show_logged_errs(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int idx, count;
-
-       /* force consistency with actual EEPROM */
-       if (ipath_update_eeprom_log(dd) != 0)
-               return -ENXIO;
-
-       count = 0;
-       for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-                       dd->ipath_eep_st_errs[idx],
-                       idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
-       }
-
-       return count;
-}
-
-/*
- * New sysfs entries to control various IB config. These all turn into
- * accesses via ipath_f_get/set_ib_cfg.
- *
- * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
- */
-static ssize_t show_hrtbt_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_hrtbt_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && val > 3)
-               ret = -EINVAL;
-       if (ret < 0) {
-               ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
-               goto bail;
-       }
-
-       /*
-        * Set the "intentional" heartbeat enable per either of
-        * "Enable" and "Auto", as these are normally set together.
-        * This bit is consulted when leaving loopback mode,
-        * because entering loopback mode overrides it and automatically
-        * disables heartbeat.
-        */
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
-       if (r < 0)
-               ret = r;
-       else if (val == IPATH_IB_HRTBT_OFF)
-               dd->ipath_flags |= IPATH_NO_HRTBT;
-       else
-               dd->ipath_flags &= ~IPATH_NO_HRTBT;
-
-bail:
-       return ret;
-}
-
-/*
- * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
- * _not_ the particular encoding of any given chip)
- */
-static ssize_t show_lwid_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_lwid_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && (val == 0 || val > 3))
-               ret = -EINVAL;
-       if (ret < 0) {
-               ipath_dev_err(dd,
-                       "attempt to set invalid Link Width (enable)\n");
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-/* Get current link width */
-static ssize_t show_lwid(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-/*
- * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
- */
-static ssize_t show_spd_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_spd_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
-               ret = -EINVAL;
-       if (ret < 0) {
-               ipath_dev_err(dd,
-                       "attempt to set invalid Link Speed (enable)\n");
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-/* Get current link speed */
-static ssize_t show_spd(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-/*
- * Get/Set RX polarity-invert enable. 0=no, 1=yes.
- */
-static ssize_t show_rx_polinv_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_rx_polinv_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && val > 1) {
-               ipath_dev_err(dd,
-                       "attempt to set invalid Rx Polarity (enable)\n");
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-/*
- * Get/Set RX lane-reversal enable. 0=no, 1=yes.
- */
-static ssize_t show_lanerev_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_lanerev_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && val > 1) {
-               ret = -EINVAL;
-               ipath_dev_err(dd,
-                       "attempt to set invalid Lane reversal (enable)\n");
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
-static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
-
-static struct attribute *driver_attributes[] = {
-       &driver_attr_num_units.attr,
-       &driver_attr_version.attr,
-       NULL
-};
-
-static struct attribute_group driver_attr_group = {
-       .attrs = driver_attributes
-};
-
-static ssize_t store_tempsense(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf,
-                              size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, stat;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret <= 0) {
-               ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
-               goto bail;
-       }
-       /* If anything but the highest limit, enable T_CRIT_A "interrupt" */
-       stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
-       if (stat) {
-               ipath_dev_err(dd, "Unable to set tempsense config\n");
-               ret = -1;
-               goto bail;
-       }
-       stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
-       if (stat) {
-               ipath_dev_err(dd, "Unable to set local Tcrit\n");
-               ret = -1;
-               goto bail;
-       }
-       stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
-       if (stat) {
-               ipath_dev_err(dd, "Unable to set remote Tcrit\n");
-               ret = -1;
-               goto bail;
-       }
-
-bail:
-       return ret;
-}
-
-/*
- * dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t show_tempsense(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-       int idx;
-       u8 regvals[8];
-
-       ret = -ENXIO;
-       for (idx = 0; idx < 8; ++idx) {
-               if (idx == 6)
-                       continue;
-               ret = ipath_tempsense_read(dd, idx);
-               if (ret < 0)
-                       break;
-               regvals[idx] = ret;
-       }
-       if (idx == 8)
-               ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
-                       *(signed char *)(regvals),
-                       *(signed char *)(regvals + 1),
-                       regvals[2], regvals[3],
-                       *(signed char *)(regvals + 5),
-                       *(signed char *)(regvals + 7));
-       return ret;
-}
-
-const struct attribute_group *ipath_driver_attr_groups[] = {
-       &driver_attr_group,
-       NULL,
-};
-
-static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
-static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
-static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
-static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
-static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
-static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
-static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
-static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
-static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
-static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
-static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
-                  show_jint_max_packets, store_jint_max_packets);
-static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
-                  show_jint_idle_ticks, store_jint_idle_ticks);
-static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
-                  show_tempsense, store_tempsense);
-
-static struct attribute *dev_attributes[] = {
-       &dev_attr_guid.attr,
-       &dev_attr_lmc.attr,
-       &dev_attr_lid.attr,
-       &dev_attr_link_state.attr,
-       &dev_attr_mlid.attr,
-       &dev_attr_mtu.attr,
-       &dev_attr_nguid.attr,
-       &dev_attr_nports.attr,
-       &dev_attr_serial.attr,
-       &dev_attr_status.attr,
-       &dev_attr_status_str.attr,
-       &dev_attr_boardversion.attr,
-       &dev_attr_unit.attr,
-       &dev_attr_enabled.attr,
-       &dev_attr_rx_pol_inv.attr,
-       &dev_attr_led_override.attr,
-       &dev_attr_logged_errors.attr,
-       &dev_attr_tempsense.attr,
-       &dev_attr_localbus_info.attr,
-       NULL
-};
-
-static struct attribute_group dev_attr_group = {
-       .attrs = dev_attributes
-};
-
-static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
-                  store_hrtbt_enb);
-static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
-                  store_lwid_enb);
-static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
-static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
-                  store_spd_enb);
-static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
-static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
-                  store_rx_polinv_enb);
-static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
-                  store_lanerev_enb);
-
-static struct attribute *dev_ibcfg_attributes[] = {
-       &dev_attr_hrtbt_enable.attr,
-       &dev_attr_link_width_enable.attr,
-       &dev_attr_link_width.attr,
-       &dev_attr_link_speed_enable.attr,
-       &dev_attr_link_speed.attr,
-       &dev_attr_rx_pol_inv_enable.attr,
-       &dev_attr_rx_lane_rev_enable.attr,
-       NULL
-};
-
-static struct attribute_group dev_ibcfg_attr_group = {
-       .attrs = dev_ibcfg_attributes
-};
-
-/**
- * ipath_expose_reset - create a device reset file
- * @dev: the device structure
- *
- * Only expose a file that lets us reset the device after someone
- * enters diag mode.  A device reset is quite likely to crash the
- * machine entirely, so we don't want to normally make it
- * available.
- *
- * Called with ipath_mutex held.
- */
-int ipath_expose_reset(struct device *dev)
-{
-       static int exposed;
-       int ret;
-
-       if (!exposed) {
-               ret = device_create_file(dev, &dev_attr_reset);
-               exposed = 1;
-       } else {
-               ret = 0;
-       }
-
-       return ret;
-}
-
-int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
-{
-       int ret;
-
-       ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
-       if (ret)
-               goto bail;
-
-       ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
-       if (ret)
-               goto bail_attrs;
-
-       if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-               ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
-               if (ret)
-                       goto bail_counter;
-               ret = device_create_file(dev, &dev_attr_jint_max_packets);
-               if (ret)
-                       goto bail_idle;
-
-               ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
-               if (ret)
-                       goto bail_max;
-       }
-
-       return 0;
-
-bail_max:
-       device_remove_file(dev, &dev_attr_jint_max_packets);
-bail_idle:
-       device_remove_file(dev, &dev_attr_jint_idle_ticks);
-bail_counter:
-       sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-bail_attrs:
-       sysfs_remove_group(&dev->kobj, &dev_attr_group);
-bail:
-       return ret;
-}
-
-void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
-{
-       sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-
-       if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-               sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
-               device_remove_file(dev, &dev_attr_jint_idle_ticks);
-               device_remove_file(dev, &dev_attr_jint_max_packets);
-       }
-
-       sysfs_remove_group(&dev->kobj, &dev_attr_group);
-
-       device_remove_file(dev, &dev_attr_reset);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
deleted file mode 100644 (file)
index 0246b30..0000000
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/**
- * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_uc_req(struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       struct ipath_swqe *wqe;
-       unsigned long flags;
-       u32 hwords;
-       u32 bth0;
-       u32 len;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       int ret = 0;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == qp->s_head)
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
-                       qp->s_flags |= IPATH_S_WAIT_DMA;
-                       goto bail;
-               }
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-               goto done;
-       }
-
-       ohdr = &qp->s_hdr.u.oth;
-       if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-               ohdr = &qp->s_hdr.u.l.oth;
-
-       /* header size in 32-bit words LRH+BTH = (8+12)/4. */
-       hwords = 5;
-       bth0 = 1 << 22; /* Set M bit */
-
-       /* Get the next send request. */
-       wqe = get_swqe_ptr(qp, qp->s_cur);
-       qp->s_wqe = NULL;
-       switch (qp->s_state) {
-       default:
-               if (!(ib_ipath_state_ops[qp->state] &
-                   IPATH_PROCESS_NEXT_SEND_OK))
-                       goto bail;
-               /* Check if send work queue is empty. */
-               if (qp->s_cur == qp->s_head)
-                       goto bail;
-               /*
-                * Start a new request.
-                */
-               qp->s_psn = wqe->psn = qp->s_next_psn;
-               qp->s_sge.sge = wqe->sg_list[0];
-               qp->s_sge.sg_list = wqe->sg_list + 1;
-               qp->s_sge.num_sge = wqe->wr.num_sge;
-               qp->s_len = len = wqe->length;
-               switch (wqe->wr.opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
-                       if (len > pmtu) {
-                               qp->s_state = OP(SEND_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_SEND)
-                               qp->s_state = OP(SEND_ONLY);
-                       else {
-                               qp->s_state =
-                                       OP(SEND_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after the BTH */
-                               ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                       }
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-                       qp->s_wqe = wqe;
-                       if (++qp->s_cur >= qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_RDMA_WRITE:
-               case IB_WR_RDMA_WRITE_WITH_IMM:
-                       ohdr->u.rc.reth.vaddr =
-                               cpu_to_be64(wqe->rdma_wr.remote_addr);
-                       ohdr->u.rc.reth.rkey =
-                               cpu_to_be32(wqe->rdma_wr.rkey);
-                       ohdr->u.rc.reth.length = cpu_to_be32(len);
-                       hwords += sizeof(struct ib_reth) / 4;
-                       if (len > pmtu) {
-                               qp->s_state = OP(RDMA_WRITE_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                               qp->s_state = OP(RDMA_WRITE_ONLY);
-                       else {
-                               qp->s_state =
-                                       OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after the RETH */
-                               ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                                       bth0 |= 1 << 23;
-                       }
-                       qp->s_wqe = wqe;
-                       if (++qp->s_cur >= qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               default:
-                       goto bail;
-               }
-               break;
-
-       case OP(SEND_FIRST):
-               qp->s_state = OP(SEND_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_SEND)
-                       qp->s_state = OP(SEND_LAST);
-               else {
-                       qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-               }
-               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                       bth0 |= 1 << 23;
-               qp->s_wqe = wqe;
-               if (++qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-
-       case OP(RDMA_WRITE_FIRST):
-               qp->s_state = OP(RDMA_WRITE_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_MIDDLE):
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                       qp->s_state = OP(RDMA_WRITE_LAST);
-               else {
-                       qp->s_state =
-                               OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-               }
-               qp->s_wqe = wqe;
-               if (++qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-       }
-       qp->s_len -= len;
-       qp->s_hdrwords = hwords;
-       qp->s_cur_sge = &qp->s_sge;
-       qp->s_cur_size = len;
-       ipath_make_ruc_header(to_idev(qp->ibqp.device),
-                             qp, ohdr, bth0 | (qp->s_state << 24),
-                             qp->s_next_psn++ & IPATH_PSN_MASK);
-done:
-       ret = 1;
-       goto unlock;
-
-bail:
-       qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * ipath_uc_rcv - handle an incoming UC packet
- * @dev: the device the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from ipath_qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       int opcode;
-       u32 hdrsize;
-       u32 psn;
-       u32 pad;
-       struct ib_wc wc;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       struct ib_reth *reth;
-       int header_in_data;
-
-       /* Validate the SLID. See Ch. 9.6.1.5 */
-       if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
-               goto done;
-
-       /* Check for GRH */
-       if (!has_grh) {
-               ohdr = &hdr->u.oth;
-               hdrsize = 8 + 12;       /* LRH + BTH */
-               psn = be32_to_cpu(ohdr->bth[2]);
-               header_in_data = 0;
-       } else {
-               ohdr = &hdr->u.l.oth;
-               hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
-               /*
-                * The header with GRH is 60 bytes and the
-                * core driver sets the eager header buffer
-                * size to 56 bytes so the last 4 bytes of
-                * the BTH header (PSN) is in the data buffer.
-                */
-               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-               if (header_in_data) {
-                       psn = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               } else
-                       psn = be32_to_cpu(ohdr->bth[2]);
-       }
-       /*
-        * The opcode is in the low byte when its in network order
-        * (top byte when in host order).
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
-       memset(&wc, 0, sizeof wc);
-
-       /* Compare the PSN verses the expected PSN. */
-       if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
-               /*
-                * Handle a sequence error.
-                * Silently drop any current message.
-                */
-               qp->r_psn = psn;
-       inv:
-               qp->r_state = OP(SEND_LAST);
-               switch (opcode) {
-               case OP(SEND_FIRST):
-               case OP(SEND_ONLY):
-               case OP(SEND_ONLY_WITH_IMMEDIATE):
-                       goto send_first;
-
-               case OP(RDMA_WRITE_FIRST):
-               case OP(RDMA_WRITE_ONLY):
-               case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
-                       goto rdma_first;
-
-               default:
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-       }
-
-       /* Check for opcode sequence errors. */
-       switch (qp->r_state) {
-       case OP(SEND_FIRST):
-       case OP(SEND_MIDDLE):
-               if (opcode == OP(SEND_MIDDLE) ||
-                   opcode == OP(SEND_LAST) ||
-                   opcode == OP(SEND_LAST_WITH_IMMEDIATE))
-                       break;
-               goto inv;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_MIDDLE):
-               if (opcode == OP(RDMA_WRITE_MIDDLE) ||
-                   opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-                       break;
-               goto inv;
-
-       default:
-               if (opcode == OP(SEND_FIRST) ||
-                   opcode == OP(SEND_ONLY) ||
-                   opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
-                   opcode == OP(RDMA_WRITE_FIRST) ||
-                   opcode == OP(RDMA_WRITE_ONLY) ||
-                   opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-                       break;
-               goto inv;
-       }
-
-       /* OK, process the packet. */
-       switch (opcode) {
-       case OP(SEND_FIRST):
-       case OP(SEND_ONLY):
-       case OP(SEND_ONLY_WITH_IMMEDIATE):
-       send_first:
-               if (qp->r_flags & IPATH_R_REUSE_SGE) {
-                       qp->r_flags &= ~IPATH_R_REUSE_SGE;
-                       qp->r_sge = qp->s_rdma_read_sge;
-               } else if (!ipath_get_rwqe(qp, 0)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Save the WQE so we can reuse it in case of an error. */
-               qp->s_rdma_read_sge = qp->r_sge;
-               qp->r_rcv_len = 0;
-               if (opcode == OP(SEND_ONLY))
-                       goto send_last;
-               else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
-                       goto send_last_imm;
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-               /* Check for invalid length PMTU or posted rwqe len. */
-               if (unlikely(tlen != (hdrsize + pmtu + 4))) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               qp->r_rcv_len += pmtu;
-               if (unlikely(qp->r_rcv_len > qp->r_len)) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               ipath_copy_sge(&qp->r_sge, data, pmtu);
-               break;
-
-       case OP(SEND_LAST_WITH_IMMEDIATE):
-       send_last_imm:
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else {
-                       /* Immediate data comes after BTH */
-                       wc.ex.imm_data = ohdr->u.imm_data;
-               }
-               hdrsize += 4;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               /* FALLTHROUGH */
-       case OP(SEND_LAST):
-       send_last:
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4))) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               wc.byte_len = tlen + qp->r_rcv_len;
-               if (unlikely(wc.byte_len > qp->r_len)) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               wc.opcode = IB_WC_RECV;
-       last_imm:
-               ipath_copy_sge(&qp->r_sge, data, tlen);
-               wc.wr_id = qp->r_wr_id;
-               wc.status = IB_WC_SUCCESS;
-               wc.qp = &qp->ibqp;
-               wc.src_qp = qp->remote_qpn;
-               wc.slid = qp->remote_ah_attr.dlid;
-               wc.sl = qp->remote_ah_attr.sl;
-               /* Signal completion event if the solicited bit is set. */
-               ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                              (ohdr->bth[0] &
-                               cpu_to_be32(1 << 23)) != 0);
-               break;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_ONLY):
-       case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
-       rdma_first:
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               hdrsize += sizeof(*reth);
-               qp->r_len = be32_to_cpu(reth->length);
-               qp->r_rcv_len = 0;
-               if (qp->r_len != 0) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       /* Check rkey */
-                       ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
-                                          vaddr, rkey,
-                                          IB_ACCESS_REMOTE_WRITE);
-                       if (unlikely(!ok)) {
-                               dev->n_pkt_drops++;
-                               goto done;
-                       }
-               } else {
-                       qp->r_sge.sg_list = NULL;
-                       qp->r_sge.sge.mr = NULL;
-                       qp->r_sge.sge.vaddr = NULL;
-                       qp->r_sge.sge.length = 0;
-                       qp->r_sge.sge.sge_length = 0;
-               }
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_WRITE))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               if (opcode == OP(RDMA_WRITE_ONLY))
-                       goto rdma_last;
-               else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-                       goto rdma_last_imm;
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_MIDDLE):
-               /* Check for invalid length PMTU or posted rwqe len. */
-               if (unlikely(tlen != (hdrsize + pmtu + 4))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               qp->r_rcv_len += pmtu;
-               if (unlikely(qp->r_rcv_len > qp->r_len)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               ipath_copy_sge(&qp->r_sge, data, pmtu);
-               break;
-
-       case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-       rdma_last_imm:
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else {
-                       /* Immediate data comes after BTH */
-                       wc.ex.imm_data = ohdr->u.imm_data;
-               }
-               hdrsize += 4;
-               wc.wc_flags = IB_WC_WITH_IMM;
-
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               if (qp->r_flags & IPATH_R_REUSE_SGE)
-                       qp->r_flags &= ~IPATH_R_REUSE_SGE;
-               else if (!ipath_get_rwqe(qp, 1)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               wc.byte_len = qp->r_len;
-               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-               goto last_imm;
-
-       case OP(RDMA_WRITE_LAST):
-       rdma_last:
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               ipath_copy_sge(&qp->r_sge, data, tlen);
-               break;
-
-       default:
-               /* Drop packet for unknown opcodes. */
-               dev->n_pkt_drops++;
-               goto done;
-       }
-       qp->r_psn++;
-       qp->r_state = opcode;
-done:
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
deleted file mode 100644 (file)
index 385d941..0000000
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from ipath_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling ipath_ud_rcv()
- * while this is being called.
- */
-static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
-{
-       struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
-       struct ipath_qp *qp;
-       struct ib_ah_attr *ah_attr;
-       unsigned long flags;
-       struct ipath_rq *rq;
-       struct ipath_srq *srq;
-       struct ipath_sge_state rsge;
-       struct ipath_sge *sge;
-       struct ipath_rwq *wq;
-       struct ipath_rwqe *wqe;
-       void (*handler)(struct ib_event *, void *);
-       struct ib_wc wc;
-       u32 tail;
-       u32 rlen;
-       u32 length;
-
-       qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn);
-       if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               dev->n_pkt_drops++;
-               goto done;
-       }
-
-       /*
-        * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
-        * Qkeys with the high order bit set mean use the
-        * qkey from the QP context instead of the WR (see 10.2.5).
-        */
-       if (unlikely(qp->ibqp.qp_num &&
-                    ((int) swqe->ud_wr.remote_qkey < 0 ?
-                     sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) {
-               /* XXX OK to lose a count once in a while. */
-               dev->qkey_violations++;
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-
-       /*
-        * A GRH is expected to precede the data even if not
-        * present on the wire.
-        */
-       length = swqe->length;
-       memset(&wc, 0, sizeof wc);
-       wc.byte_len = length + sizeof(struct ib_grh);
-
-       if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
-               wc.wc_flags = IB_WC_WITH_IMM;
-               wc.ex.imm_data = swqe->wr.ex.imm_data;
-       }
-
-       /*
-        * This would be a lot simpler if we could call ipath_get_rwqe()
-        * but that uses state that the receive interrupt handler uses
-        * so we would need to lock out receive interrupts while doing
-        * local loopback.
-        */
-       if (qp->ibqp.srq) {
-               srq = to_isrq(qp->ibqp.srq);
-               handler = srq->ibsrq.event_handler;
-               rq = &srq->rq;
-       } else {
-               srq = NULL;
-               handler = NULL;
-               rq = &qp->r_rq;
-       }
-
-       /*
-        * Get the next work request entry to find where to put the data.
-        * Note that it is safe to drop the lock after changing rq->tail
-        * since ipath_post_receive() won't fill the empty slot.
-        */
-       spin_lock_irqsave(&rq->lock, flags);
-       wq = rq->wq;
-       tail = wq->tail;
-       /* Validate tail before using it since it is user writable. */
-       if (tail >= rq->size)
-               tail = 0;
-       if (unlikely(tail == wq->head)) {
-               spin_unlock_irqrestore(&rq->lock, flags);
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-       wqe = get_rwqe_ptr(rq, tail);
-       rsge.sg_list = qp->r_ud_sg_list;
-       if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
-               spin_unlock_irqrestore(&rq->lock, flags);
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-       /* Silently drop packets which are too big. */
-       if (wc.byte_len > rlen) {
-               spin_unlock_irqrestore(&rq->lock, flags);
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-       if (++tail >= rq->size)
-               tail = 0;
-       wq->tail = tail;
-       wc.wr_id = wqe->wr_id;
-       if (handler) {
-               u32 n;
-
-               /*
-                * validate head pointer value and compute
-                * the number of remaining WQEs.
-                */
-               n = wq->head;
-               if (n >= rq->size)
-                       n = 0;
-               if (n < tail)
-                       n += rq->size - tail;
-               else
-                       n -= tail;
-               if (n < srq->limit) {
-                       struct ib_event ev;
-
-                       srq->limit = 0;
-                       spin_unlock_irqrestore(&rq->lock, flags);
-                       ev.device = qp->ibqp.device;
-                       ev.element.srq = qp->ibqp.srq;
-                       ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-                       handler(&ev, srq->ibsrq.srq_context);
-               } else
-                       spin_unlock_irqrestore(&rq->lock, flags);
-       } else
-               spin_unlock_irqrestore(&rq->lock, flags);
-
-       ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
-               wc.wc_flags |= IB_WC_GRH;
-       } else
-               ipath_skip_sge(&rsge, sizeof(struct ib_grh));
-       sge = swqe->sg_list;
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               ipath_copy_sge(&rsge, sge->vaddr, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--swqe->wr.num_sge)
-                               sge++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               length -= len;
-       }
-       wc.status = IB_WC_SUCCESS;
-       wc.opcode = IB_WC_RECV;
-       wc.qp = &qp->ibqp;
-       wc.src_qp = sqp->ibqp.qp_num;
-       /* XXX do we know which pkey matched? Only needed for GSI. */
-       wc.pkey_index = 0;
-       wc.slid = dev->dd->ipath_lid |
-               (ah_attr->src_path_bits &
-                ((1 << dev->dd->ipath_lmc) - 1));
-       wc.sl = ah_attr->sl;
-       wc.dlid_path_bits =
-               ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
-       wc.port_num = 1;
-       /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                      swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED);
-drop:
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-done:;
-}
-
-/**
- * ipath_make_ud_req - construct a UD request packet
- * @qp: the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_ud_req(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_other_headers *ohdr;
-       struct ib_ah_attr *ah_attr;
-       struct ipath_swqe *wqe;
-       unsigned long flags;
-       u32 nwords;
-       u32 extra_bytes;
-       u32 bth0;
-       u16 lrh0;
-       u16 lid;
-       int ret = 0;
-       int next_cur;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == qp->s_head)
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
-                       qp->s_flags |= IPATH_S_WAIT_DMA;
-                       goto bail;
-               }
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-               goto done;
-       }
-
-       if (qp->s_cur == qp->s_head)
-               goto bail;
-
-       wqe = get_swqe_ptr(qp, qp->s_cur);
-       next_cur = qp->s_cur + 1;
-       if (next_cur >= qp->s_size)
-               next_cur = 0;
-
-       /* Construct the header. */
-       ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
-       if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
-               if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
-                       dev->n_multicast_xmit++;
-               else
-                       dev->n_unicast_xmit++;
-       } else {
-               dev->n_unicast_xmit++;
-               lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
-               if (unlikely(lid == dev->dd->ipath_lid)) {
-                       /*
-                        * If DMAs are in progress, we can't generate
-                        * a completion for the loopback packet since
-                        * it would be out of order.
-                        * XXX Instead of waiting, we could queue a
-                        * zero length descriptor so we get a callback.
-                        */
-                       if (atomic_read(&qp->s_dma_busy)) {
-                               qp->s_flags |= IPATH_S_WAIT_DMA;
-                               goto bail;
-                       }
-                       qp->s_cur = next_cur;
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
-                       ipath_ud_loopback(qp, wqe);
-                       spin_lock_irqsave(&qp->s_lock, flags);
-                       ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
-                       goto done;
-               }
-       }
-
-       qp->s_cur = next_cur;
-       extra_bytes = -wqe->length & 3;
-       nwords = (wqe->length + extra_bytes) >> 2;
-
-       /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
-       qp->s_hdrwords = 7;
-       qp->s_cur_size = wqe->length;
-       qp->s_cur_sge = &qp->s_sge;
-       qp->s_dmult = ah_attr->static_rate;
-       qp->s_wqe = wqe;
-       qp->s_sge.sge = wqe->sg_list[0];
-       qp->s_sge.sg_list = wqe->sg_list + 1;
-       qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge;
-
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               /* Header size in 32-bit words. */
-               qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
-                                                &ah_attr->grh,
-                                                qp->s_hdrwords, nwords);
-               lrh0 = IPATH_LRH_GRH;
-               ohdr = &qp->s_hdr.u.l.oth;
-               /*
-                * Don't worry about sending to locally attached multicast
-                * QPs.  It is unspecified by the spec. what happens.
-                */
-       } else {
-               /* Header size in 32-bit words. */
-               lrh0 = IPATH_LRH_BTH;
-               ohdr = &qp->s_hdr.u.oth;
-       }
-       if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) {
-               qp->s_hdrwords++;
-               ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data;
-               bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
-       } else
-               bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
-       lrh0 |= ah_attr->sl << 4;
-       if (qp->ibqp.qp_type == IB_QPT_SMI)
-               lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
-       qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-       qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
-       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
-                                          SIZE_OF_CRC);
-       lid = dev->dd->ipath_lid;
-       if (lid) {
-               lid |= ah_attr->src_path_bits &
-                       ((1 << dev->dd->ipath_lmc) - 1);
-               qp->s_hdr.lrh[3] = cpu_to_be16(lid);
-       } else
-               qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
-       if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED)
-               bth0 |= 1 << 23;
-       bth0 |= extra_bytes << 20;
-       bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
-               ipath_get_pkey(dev->dd, qp->s_pkey_index);
-       ohdr->bth[0] = cpu_to_be32(bth0);
-       /*
-        * Use the multicast QP if the destination LID is a multicast LID.
-        */
-       ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
-               ah_attr->dlid != IPATH_PERMISSIVE_LID ?
-               cpu_to_be32(IPATH_MULTICAST_QPN) :
-               cpu_to_be32(wqe->ud_wr.remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
-       /*
-        * Qkeys with the high order bit set mean use the
-        * qkey from the QP context instead of the WR (see 10.2.5).
-        */
-       ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
-                                        qp->qkey : wqe->ud_wr.remote_qkey);
-       ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
-       ret = 1;
-       goto unlock;
-
-bail:
-       qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * ipath_ud_rcv - receive an incoming UD packet
- * @dev: the device the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       int opcode;
-       u32 hdrsize;
-       u32 pad;
-       struct ib_wc wc;
-       u32 qkey;
-       u32 src_qp;
-       u16 dlid;
-       int header_in_data;
-
-       /* Check for GRH */
-       if (!has_grh) {
-               ohdr = &hdr->u.oth;
-               hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
-               qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
-               src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
-               header_in_data = 0;
-       } else {
-               ohdr = &hdr->u.l.oth;
-               hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
-               /*
-                * The header with GRH is 68 bytes and the core driver sets
-                * the eager header buffer size to 56 bytes so the last 12
-                * bytes of the IB header is in the data buffer.
-                */
-               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-               if (header_in_data) {
-                       qkey = be32_to_cpu(((__be32 *) data)[1]);
-                       src_qp = be32_to_cpu(((__be32 *) data)[2]);
-                       data += 12;
-               } else {
-                       qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
-                       src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
-               }
-       }
-       src_qp &= IPATH_QPN_MASK;
-
-       /*
-        * Check that the permissive LID is only used on QP0
-        * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
-        */
-       if (qp->ibqp.qp_num) {
-               if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-                            hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-               if (unlikely(qkey != qp->qkey)) {
-                       /* XXX OK to lose a count once in a while. */
-                       dev->qkey_violations++;
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-       } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
-                  hdr->lrh[3] == IB_LID_PERMISSIVE) {
-               struct ib_smp *smp = (struct ib_smp *) data;
-
-               if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-       }
-
-       /*
-        * The opcode is in the low byte when its in network order
-        * (top byte when in host order).
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-       if (qp->ibqp.qp_num > 1 &&
-           opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else
-                       wc.ex.imm_data = ohdr->u.ud.imm_data;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               hdrsize += sizeof(u32);
-       } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
-               wc.ex.imm_data = 0;
-               wc.wc_flags = 0;
-       } else {
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-
-       /* Get the number of bytes the message was padded by. */
-       pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-       if (unlikely(tlen < (hdrsize + pad + 4))) {
-               /* Drop incomplete packets. */
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-       tlen -= hdrsize + pad + 4;
-
-       /* Drop invalid MAD packets (see 13.5.3.1). */
-       if (unlikely((qp->ibqp.qp_num == 0 &&
-                     (tlen != 256 ||
-                      (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
-                    (qp->ibqp.qp_num == 1 &&
-                     (tlen != 256 ||
-                      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-
-       /*
-        * A GRH is expected to precede the data even if not
-        * present on the wire.
-        */
-       wc.byte_len = tlen + sizeof(struct ib_grh);
-
-       /*
-        * Get the next work request entry to find where to put the data.
-        */
-       if (qp->r_flags & IPATH_R_REUSE_SGE)
-               qp->r_flags &= ~IPATH_R_REUSE_SGE;
-       else if (!ipath_get_rwqe(qp, 0)) {
-               /*
-                * Count VL15 packets dropped due to no receive buffer.
-                * Otherwise, count them as buffer overruns since usually,
-                * the HW will be able to receive packets even if there are
-                * no QPs with posted receive buffers.
-                */
-               if (qp->ibqp.qp_num == 0)
-                       dev->n_vl15_dropped++;
-               else
-                       dev->rcv_errors++;
-               goto bail;
-       }
-       /* Silently drop packets which are too big. */
-       if (wc.byte_len > qp->r_len) {
-               qp->r_flags |= IPATH_R_REUSE_SGE;
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-       if (has_grh) {
-               ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
-                              sizeof(struct ib_grh));
-               wc.wc_flags |= IB_WC_GRH;
-       } else
-               ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
-       ipath_copy_sge(&qp->r_sge, data,
-                      wc.byte_len - sizeof(struct ib_grh));
-       if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-               goto bail;
-       wc.wr_id = qp->r_wr_id;
-       wc.status = IB_WC_SUCCESS;
-       wc.opcode = IB_WC_RECV;
-       wc.vendor_err = 0;
-       wc.qp = &qp->ibqp;
-       wc.src_qp = src_qp;
-       /* XXX do we know which pkey matched? Only needed for GSI. */
-       wc.pkey_index = 0;
-       wc.slid = be16_to_cpu(hdr->lrh[3]);
-       wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
-       dlid = be16_to_cpu(hdr->lrh[1]);
-       /*
-        * Save the LMC lower bits if the destination LID is a unicast LID.
-        */
-       wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
-               dlid & ((1 << dev->dd->ipath_lmc) - 1);
-       wc.port_num = 1;
-       /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                      (ohdr->bth[0] &
-                       cpu_to_be32(1 << 23)) != 0);
-
-bail:;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
deleted file mode 100644 (file)
index d29b4da..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-static void __ipath_release_user_pages(struct page **p, size_t num_pages,
-                                  int dirty)
-{
-       size_t i;
-
-       for (i = 0; i < num_pages; i++) {
-               ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
-                          (unsigned long) num_pages, p[i]);
-               if (dirty)
-                       set_page_dirty_lock(p[i]);
-               put_page(p[i]);
-       }
-}
-
-/* call with current->mm->mmap_sem held */
-static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                                 struct page **p)
-{
-       unsigned long lock_limit;
-       size_t got;
-       int ret;
-
-       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
-       if (num_pages > lock_limit) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
-                  (unsigned long) num_pages, start_page);
-
-       for (got = 0; got < num_pages; got += ret) {
-               ret = get_user_pages(current, current->mm,
-                                    start_page + got * PAGE_SIZE,
-                                    num_pages - got, 1, 1,
-                                    p + got, NULL);
-               if (ret < 0)
-                       goto bail_release;
-       }
-
-       current->mm->pinned_vm += num_pages;
-
-       ret = 0;
-       goto bail;
-
-bail_release:
-       __ipath_release_user_pages(p, got, 0);
-bail:
-       return ret;
-}
-
-/**
- * ipath_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page,
-       unsigned long offset, size_t size, int direction)
-{
-       dma_addr_t phys;
-
-       phys = pci_map_page(hwdev, page, offset, size, direction);
-
-       if (phys == 0) {
-               pci_unmap_page(hwdev, phys, size, direction);
-               phys = pci_map_page(hwdev, page, offset, size, direction);
-               /*
-                * FIXME: If we get 0 again, we should keep this page,
-                * map another, then free the 0 page.
-                */
-       }
-
-       return phys;
-}
-
-/**
- * ipath_map_single - a safety wrapper around pci_map_single()
- *
- * Same idea as ipath_map_page().
- */
-dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
-       int direction)
-{
-       dma_addr_t phys;
-
-       phys = pci_map_single(hwdev, ptr, size, direction);
-
-       if (phys == 0) {
-               pci_unmap_single(hwdev, phys, size, direction);
-               phys = pci_map_single(hwdev, ptr, size, direction);
-               /*
-                * FIXME: If we get 0 again, we should keep this page,
-                * map another, then free the 0 page.
-                */
-       }
-
-       return phys;
-}
-
-/**
- * ipath_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages.  For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                        struct page **p)
-{
-       int ret;
-
-       down_write(&current->mm->mmap_sem);
-
-       ret = __ipath_get_user_pages(start_page, num_pages, p);
-
-       up_write(&current->mm->mmap_sem);
-
-       return ret;
-}
-
-void ipath_release_user_pages(struct page **p, size_t num_pages)
-{
-       down_write(&current->mm->mmap_sem);
-
-       __ipath_release_user_pages(p, num_pages, 1);
-
-       current->mm->pinned_vm -= num_pages;
-
-       up_write(&current->mm->mmap_sem);
-}
-
-struct ipath_user_pages_work {
-       struct work_struct work;
-       struct mm_struct *mm;
-       unsigned long num_pages;
-};
-
-static void user_pages_account(struct work_struct *_work)
-{
-       struct ipath_user_pages_work *work =
-               container_of(_work, struct ipath_user_pages_work, work);
-
-       down_write(&work->mm->mmap_sem);
-       work->mm->pinned_vm -= work->num_pages;
-       up_write(&work->mm->mmap_sem);
-       mmput(work->mm);
-       kfree(work);
-}
-
-void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
-{
-       struct ipath_user_pages_work *work;
-       struct mm_struct *mm;
-
-       __ipath_release_user_pages(p, num_pages, 1);
-
-       mm = get_task_mm(current);
-       if (!mm)
-               return;
-
-       work = kmalloc(sizeof(*work), GFP_KERNEL);
-       if (!work)
-               goto bail_mm;
-
-       INIT_WORK(&work->work, user_pages_account);
-       work->mm = mm;
-       work->num_pages = num_pages;
-
-       queue_work(ib_wq, &work->work);
-       return;
-
-bail_mm:
-       mmput(mm);
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.c b/drivers/staging/rdma/ipath/ipath_user_sdma.c
deleted file mode 100644 (file)
index 8c12e3c..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_user_sdma.h"
-
-/* minimum size of header */
-#define IPATH_USER_SDMA_MIN_HEADER_LENGTH      64
-/* expected size of headers (for dma_pool) */
-#define IPATH_USER_SDMA_EXP_HEADER_LENGTH      64
-/* length mask in PBC (lower 11 bits) */
-#define IPATH_PBC_LENGTH_MASK                  ((1 << 11) - 1)
-
-struct ipath_user_sdma_pkt {
-       u8 naddr;               /* dimension of addr (1..3) ... */
-       u32 counter;            /* sdma pkts queued counter for this entry */
-       u64 added;              /* global descq number of entries */
-
-       struct {
-               u32 offset;                     /* offset for kvaddr, addr */
-               u32 length;                     /* length in page */
-               u8  put_page;                   /* should we put_page? */
-               u8  dma_mapped;                 /* is page dma_mapped? */
-               struct page *page;              /* may be NULL (coherent mem) */
-               void *kvaddr;                   /* FIXME: only for pio hack */
-               dma_addr_t addr;
-       } addr[4];   /* max pages, any more and we coalesce */
-       struct list_head list;  /* list element */
-};
-
-struct ipath_user_sdma_queue {
-       /*
-        * pkts sent to dma engine are queued on this
-        * list head.  the type of the elements of this
-        * list are struct ipath_user_sdma_pkt...
-        */
-       struct list_head sent;
-
-       /* headers with expected length are allocated from here... */
-       char header_cache_name[64];
-       struct dma_pool *header_cache;
-
-       /* packets are allocated from the slab cache... */
-       char pkt_slab_name[64];
-       struct kmem_cache *pkt_slab;
-
-       /* as packets go on the queued queue, they are counted... */
-       u32 counter;
-       u32 sent_counter;
-
-       /* dma page table */
-       struct rb_root dma_pages_root;
-
-       /* protect everything above... */
-       struct mutex lock;
-};
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
-{
-       struct ipath_user_sdma_queue *pq =
-               kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
-
-       if (!pq)
-               goto done;
-
-       pq->counter = 0;
-       pq->sent_counter = 0;
-       INIT_LIST_HEAD(&pq->sent);
-
-       mutex_init(&pq->lock);
-
-       snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
-                "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
-       pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
-                                        sizeof(struct ipath_user_sdma_pkt),
-                                        0, 0, NULL);
-
-       if (!pq->pkt_slab)
-               goto err_kfree;
-
-       snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
-                "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
-       pq->header_cache = dma_pool_create(pq->header_cache_name,
-                                          dev,
-                                          IPATH_USER_SDMA_EXP_HEADER_LENGTH,
-                                          4, 0);
-       if (!pq->header_cache)
-               goto err_slab;
-
-       pq->dma_pages_root = RB_ROOT;
-
-       goto done;
-
-err_slab:
-       kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
-       kfree(pq);
-       pq = NULL;
-
-done:
-       return pq;
-}
-
-static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
-                                     int i, size_t offset, size_t len,
-                                     int put_page, int dma_mapped,
-                                     struct page *page,
-                                     void *kvaddr, dma_addr_t dma_addr)
-{
-       pkt->addr[i].offset = offset;
-       pkt->addr[i].length = len;
-       pkt->addr[i].put_page = put_page;
-       pkt->addr[i].dma_mapped = dma_mapped;
-       pkt->addr[i].page = page;
-       pkt->addr[i].kvaddr = kvaddr;
-       pkt->addr[i].addr = dma_addr;
-}
-
-static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
-                                       u32 counter, size_t offset,
-                                       size_t len, int dma_mapped,
-                                       struct page *page,
-                                       void *kvaddr, dma_addr_t dma_addr)
-{
-       pkt->naddr = 1;
-       pkt->counter = counter;
-       ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
-                                 kvaddr, dma_addr);
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
-                                   struct ipath_user_sdma_pkt *pkt,
-                                   const struct iovec *iov,
-                                   unsigned long niov) {
-       int ret = 0;
-       struct page *page = alloc_page(GFP_KERNEL);
-       void *mpage_save;
-       char *mpage;
-       int i;
-       int len = 0;
-       dma_addr_t dma_addr;
-
-       if (!page) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       mpage = kmap(page);
-       mpage_save = mpage;
-       for (i = 0; i < niov; i++) {
-               int cfur;
-
-               cfur = copy_from_user(mpage,
-                                     iov[i].iov_base, iov[i].iov_len);
-               if (cfur) {
-                       ret = -EFAULT;
-                       goto free_unmap;
-               }
-
-               mpage += iov[i].iov_len;
-               len += iov[i].iov_len;
-       }
-
-       dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
-                               DMA_TO_DEVICE);
-       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-               ret = -ENOMEM;
-               goto free_unmap;
-       }
-
-       ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
-                                 dma_addr);
-       pkt->naddr = 2;
-
-       goto done;
-
-free_unmap:
-       kunmap(page);
-       __free_page(page);
-done:
-       return ret;
-}
-
-/* how many pages in this iovec element? */
-static int ipath_user_sdma_num_pages(const struct iovec *iov)
-{
-       const unsigned long addr  = (unsigned long) iov->iov_base;
-       const unsigned long  len  = iov->iov_len;
-       const unsigned long spage = addr & PAGE_MASK;
-       const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
-       return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-/* truncate length to page boundary */
-static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
-{
-       const unsigned long offset = offset_in_page(addr);
-
-       return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
-}
-
-static void ipath_user_sdma_free_pkt_frag(struct device *dev,
-                                         struct ipath_user_sdma_queue *pq,
-                                         struct ipath_user_sdma_pkt *pkt,
-                                         int frag)
-{
-       const int i = frag;
-
-       if (pkt->addr[i].page) {
-               if (pkt->addr[i].dma_mapped)
-                       dma_unmap_page(dev,
-                                      pkt->addr[i].addr,
-                                      pkt->addr[i].length,
-                                      DMA_TO_DEVICE);
-
-               if (pkt->addr[i].kvaddr)
-                       kunmap(pkt->addr[i].page);
-
-               if (pkt->addr[i].put_page)
-                       put_page(pkt->addr[i].page);
-               else
-                       __free_page(pkt->addr[i].page);
-       } else if (pkt->addr[i].kvaddr)
-               /* free coherent mem from cache... */
-               dma_pool_free(pq->header_cache,
-                             pkt->addr[i].kvaddr, pkt->addr[i].addr);
-}
-
-/* return number of pages pinned... */
-static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
-                                    struct ipath_user_sdma_pkt *pkt,
-                                    unsigned long addr, int tlen, int npages)
-{
-       struct page *pages[2];
-       int j;
-       int ret;
-
-       ret = get_user_pages_fast(addr, npages, 0, pages);
-       if (ret != npages) {
-               int i;
-
-               for (i = 0; i < ret; i++)
-                       put_page(pages[i]);
-
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       for (j = 0; j < npages; j++) {
-               /* map the pages... */
-               const int flen =
-                       ipath_user_sdma_page_length(addr, tlen);
-               dma_addr_t dma_addr =
-                       dma_map_page(&dd->pcidev->dev,
-                                    pages[j], 0, flen, DMA_TO_DEVICE);
-               unsigned long fofs = offset_in_page(addr);
-
-               if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
-
-               ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
-                                         pages[j], kmap(pages[j]),
-                                         dma_addr);
-
-               pkt->naddr++;
-               addr += flen;
-               tlen -= flen;
-       }
-
-done:
-       return ret;
-}
-
-static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
-                                  struct ipath_user_sdma_queue *pq,
-                                  struct ipath_user_sdma_pkt *pkt,
-                                  const struct iovec *iov,
-                                  unsigned long niov)
-{
-       int ret = 0;
-       unsigned long idx;
-
-       for (idx = 0; idx < niov; idx++) {
-               const int npages = ipath_user_sdma_num_pages(iov + idx);
-               const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
-               ret = ipath_user_sdma_pin_pages(dd, pkt,
-                                               addr, iov[idx].iov_len,
-                                               npages);
-               if (ret < 0)
-                       goto free_pkt;
-       }
-
-       goto done;
-
-free_pkt:
-       for (idx = 0; idx < pkt->naddr; idx++)
-               ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
-done:
-       return ret;
-}
-
-static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
-                                       struct ipath_user_sdma_queue *pq,
-                                       struct ipath_user_sdma_pkt *pkt,
-                                       const struct iovec *iov,
-                                       unsigned long niov, int npages)
-{
-       int ret = 0;
-
-       if (npages >= ARRAY_SIZE(pkt->addr))
-               ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
-       else
-               ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
-       return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void ipath_user_sdma_free_pkt_list(struct device *dev,
-                                         struct ipath_user_sdma_queue *pq,
-                                         struct list_head *list)
-{
-       struct ipath_user_sdma_pkt *pkt, *pkt_next;
-
-       list_for_each_entry_safe(pkt, pkt_next, list, list) {
-               int i;
-
-               for (i = 0; i < pkt->naddr; i++)
-                       ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
-               kmem_cache_free(pq->pkt_slab, pkt);
-       }
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total.  list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
-                                     struct ipath_user_sdma_queue *pq,
-                                     struct list_head *list,
-                                     const struct iovec *iov,
-                                     unsigned long niov,
-                                     int maxpkts)
-{
-       unsigned long idx = 0;
-       int ret = 0;
-       int npkts = 0;
-       struct page *page = NULL;
-       __le32 *pbc;
-       dma_addr_t dma_addr;
-       struct ipath_user_sdma_pkt *pkt = NULL;
-       size_t len;
-       size_t nw;
-       u32 counter = pq->counter;
-       int dma_mapped = 0;
-
-       while (idx < niov && npkts < maxpkts) {
-               const unsigned long addr = (unsigned long) iov[idx].iov_base;
-               const unsigned long idx_save = idx;
-               unsigned pktnw;
-               unsigned pktnwc;
-               int nfrags = 0;
-               int npages = 0;
-               int cfur;
-
-               dma_mapped = 0;
-               len = iov[idx].iov_len;
-               nw = len >> 2;
-               page = NULL;
-
-               pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
-               if (!pkt) {
-                       ret = -ENOMEM;
-                       goto free_list;
-               }
-
-               if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
-                   len > PAGE_SIZE || len & 3 || addr & 3) {
-                       ret = -EINVAL;
-                       goto free_pkt;
-               }
-
-               if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
-                       pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
-                                            &dma_addr);
-               else
-                       pbc = NULL;
-
-               if (!pbc) {
-                       page = alloc_page(GFP_KERNEL);
-                       if (!page) {
-                               ret = -ENOMEM;
-                               goto free_pkt;
-                       }
-                       pbc = kmap(page);
-               }
-
-               cfur = copy_from_user(pbc, iov[idx].iov_base, len);
-               if (cfur) {
-                       ret = -EFAULT;
-                       goto free_pbc;
-               }
-
-               /*
-                * this assignment is a bit strange.  it's because the
-                * the pbc counts the number of 32 bit words in the full
-                * packet _except_ the first word of the pbc itself...
-                */
-               pktnwc = nw - 1;
-
-               /*
-                * pktnw computation yields the number of 32 bit words
-                * that the caller has indicated in the PBC.  note that
-                * this is one less than the total number of words that
-                * goes to the send DMA engine as the first 32 bit word
-                * of the PBC itself is not counted.  Armed with this count,
-                * we can verify that the packet is consistent with the
-                * iovec lengths.
-                */
-               pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
-               if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
-                       ret = -EINVAL;
-                       goto free_pbc;
-               }
-
-
-               idx++;
-               while (pktnwc < pktnw && idx < niov) {
-                       const size_t slen = iov[idx].iov_len;
-                       const unsigned long faddr =
-                               (unsigned long) iov[idx].iov_base;
-
-                       if (slen & 3 || faddr & 3 || !slen ||
-                           slen > PAGE_SIZE) {
-                               ret = -EINVAL;
-                               goto free_pbc;
-                       }
-
-                       npages++;
-                       if ((faddr & PAGE_MASK) !=
-                           ((faddr + slen - 1) & PAGE_MASK))
-                               npages++;
-
-                       pktnwc += slen >> 2;
-                       idx++;
-                       nfrags++;
-               }
-
-               if (pktnwc != pktnw) {
-                       ret = -EINVAL;
-                       goto free_pbc;
-               }
-
-               if (page) {
-                       dma_addr = dma_map_page(&dd->pcidev->dev,
-                                               page, 0, len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-                               ret = -ENOMEM;
-                               goto free_pbc;
-                       }
-
-                       dma_mapped = 1;
-               }
-
-               ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
-                                           page, pbc, dma_addr);
-
-               if (nfrags) {
-                       ret = ipath_user_sdma_init_payload(dd, pq, pkt,
-                                                          iov + idx_save + 1,
-                                                          nfrags, npages);
-                       if (ret < 0)
-                               goto free_pbc_dma;
-               }
-
-               counter++;
-               npkts++;
-
-               list_add_tail(&pkt->list, list);
-       }
-
-       ret = idx;
-       goto done;
-
-free_pbc_dma:
-       if (dma_mapped)
-               dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
-free_pbc:
-       if (page) {
-               kunmap(page);
-               __free_page(page);
-       } else
-               dma_pool_free(pq->header_cache, pbc, dma_addr);
-free_pkt:
-       kmem_cache_free(pq->pkt_slab, pkt);
-free_list:
-       ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
-       return ret;
-}
-
-static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
-                                                u32 c)
-{
-       pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
-                                      struct ipath_user_sdma_queue *pq)
-{
-       struct list_head free_list;
-       struct ipath_user_sdma_pkt *pkt;
-       struct ipath_user_sdma_pkt *pkt_prev;
-       int ret = 0;
-
-       INIT_LIST_HEAD(&free_list);
-
-       list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
-               s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
-
-               if (descd < 0)
-                       break;
-
-               list_move_tail(&pkt->list, &free_list);
-
-               /* one more packet cleaned */
-               ret++;
-       }
-
-       if (!list_empty(&free_list)) {
-               u32 counter;
-
-               pkt = list_entry(free_list.prev,
-                                struct ipath_user_sdma_pkt, list);
-               counter = pkt->counter;
-
-               ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
-               ipath_user_sdma_set_complete_counter(pq, counter);
-       }
-
-       return ret;
-}
-
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
-{
-       if (!pq)
-               return;
-
-       kmem_cache_destroy(pq->pkt_slab);
-       dma_pool_destroy(pq->header_cache);
-       kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
-{
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       ret = ipath_sdma_make_progress(dd);
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
-                                struct ipath_user_sdma_queue *pq)
-{
-       int i;
-
-       if (!pq)
-               return;
-
-       for (i = 0; i < 100; i++) {
-               mutex_lock(&pq->lock);
-               if (list_empty(&pq->sent)) {
-                       mutex_unlock(&pq->lock);
-                       break;
-               }
-               ipath_user_sdma_hwqueue_clean(dd);
-               ipath_user_sdma_queue_clean(dd, pq);
-               mutex_unlock(&pq->lock);
-               msleep(10);
-       }
-
-       if (!list_empty(&pq->sent)) {
-               struct list_head free_list;
-
-               printk(KERN_INFO "drain: lists not empty: forcing!\n");
-               INIT_LIST_HEAD(&free_list);
-               mutex_lock(&pq->lock);
-               list_splice_init(&pq->sent, &free_list);
-               ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
-               mutex_unlock(&pq->lock);
-       }
-}
-
-static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
-                                          u64 addr, u64 dwlen, u64 dwoffset)
-{
-       return cpu_to_le64(/* SDmaPhyAddr[31:0] */
-                          ((addr & 0xfffffffcULL) << 32) |
-                          /* SDmaGeneration[1:0] */
-                          ((dd->ipath_sdma_generation & 3ULL) << 30) |
-                          /* SDmaDwordCount[10:0] */
-                          ((dwlen & 0x7ffULL) << 16) |
-                          /* SDmaBufOffset[12:2] */
-                          (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
-{
-       return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
-{
-                                             /* last */  /* dma head */
-       return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 ipath_sdma_make_desc1(u64 addr)
-{
-       /* SDmaPhyAddr[47:32] */
-       return cpu_to_le64(addr >> 32);
-}
-
-static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
-                                     struct ipath_user_sdma_pkt *pkt, int idx,
-                                     unsigned ofs, u16 tail)
-{
-       const u64 addr = (u64) pkt->addr[idx].addr +
-               (u64) pkt->addr[idx].offset;
-       const u64 dwlen = (u64) pkt->addr[idx].length / 4;
-       __le64 *descqp;
-       __le64 descq0;
-
-       descqp = &dd->ipath_sdma_descq[tail].qw[0];
-
-       descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
-       if (idx == 0)
-               descq0 = ipath_sdma_make_first_desc0(descq0);
-       if (idx == pkt->naddr - 1)
-               descq0 = ipath_sdma_make_last_desc0(descq0);
-
-       descqp[0] = descq0;
-       descqp[1] = ipath_sdma_make_desc1(addr);
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
-                                    struct ipath_user_sdma_queue *pq,
-                                    struct list_head *pktlist)
-{
-       int ret = 0;
-       unsigned long flags;
-       u16 tail;
-
-       if (list_empty(pktlist))
-               return 0;
-
-       if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
-               return -ECOMM;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
-               ret = -ECOMM;
-               goto unlock;
-       }
-
-       tail = dd->ipath_sdma_descq_tail;
-       while (!list_empty(pktlist)) {
-               struct ipath_user_sdma_pkt *pkt =
-                       list_entry(pktlist->next, struct ipath_user_sdma_pkt,
-                                  list);
-               int i;
-               unsigned ofs = 0;
-               u16 dtail = tail;
-
-               if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
-                       goto unlock_check_tail;
-
-               for (i = 0; i < pkt->naddr; i++) {
-                       ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
-                       ofs += pkt->addr[i].length >> 2;
-
-                       if (++tail == dd->ipath_sdma_descq_cnt) {
-                               tail = 0;
-                               ++dd->ipath_sdma_generation;
-                       }
-               }
-
-               if ((ofs<<2) > dd->ipath_ibmaxlen) {
-                       ipath_dbg("packet size %X > ibmax %X, fail\n",
-                               ofs<<2, dd->ipath_ibmaxlen);
-                       ret = -EMSGSIZE;
-                       goto unlock;
-               }
-
-               /*
-                * if the packet is >= 2KB mtu equivalent, we have to use
-                * the large buffers, and have to mark each descriptor as
-                * part of a large buffer packet.
-                */
-               if (ofs >= IPATH_SMALLBUF_DWORDS) {
-                       for (i = 0; i < pkt->naddr; i++) {
-                               dd->ipath_sdma_descq[dtail].qw[0] |=
-                                       cpu_to_le64(1ULL << 14);
-                               if (++dtail == dd->ipath_sdma_descq_cnt)
-                                       dtail = 0;
-                       }
-               }
-
-               dd->ipath_sdma_descq_added += pkt->naddr;
-               pkt->added = dd->ipath_sdma_descq_added;
-               list_move_tail(&pkt->list, &pq->sent);
-               ret++;
-       }
-
-unlock_check_tail:
-       /* advance the tail on the chip if necessary */
-       if (dd->ipath_sdma_descq_tail != tail) {
-               wmb();
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-               dd->ipath_sdma_descq_tail = tail;
-       }
-
-unlock:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       return ret;
-}
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
-                          struct ipath_user_sdma_queue *pq,
-                          const struct iovec *iov,
-                          unsigned long dim)
-{
-       int ret = 0;
-       struct list_head list;
-       int npkts = 0;
-
-       INIT_LIST_HEAD(&list);
-
-       mutex_lock(&pq->lock);
-
-       if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
-               ipath_user_sdma_hwqueue_clean(dd);
-               ipath_user_sdma_queue_clean(dd, pq);
-       }
-
-       while (dim) {
-               const int mxp = 8;
-
-               ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
-               if (ret <= 0)
-                       goto done_unlock;
-               else {
-                       dim -= ret;
-                       iov += ret;
-               }
-
-               /* force packets onto the sdma hw queue... */
-               if (!list_empty(&list)) {
-                       /*
-                        * lazily clean hw queue.  the 4 is a guess of about
-                        * how many sdma descriptors a packet will take (it
-                        * doesn't have to be perfect).
-                        */
-                       if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
-                               ipath_user_sdma_hwqueue_clean(dd);
-                               ipath_user_sdma_queue_clean(dd, pq);
-                       }
-
-                       ret = ipath_user_sdma_push_pkts(dd, pq, &list);
-                       if (ret < 0)
-                               goto done_unlock;
-                       else {
-                               npkts += ret;
-                               pq->counter += ret;
-
-                               if (!list_empty(&list))
-                                       goto done_unlock;
-                       }
-               }
-       }
-
-done_unlock:
-       if (!list_empty(&list))
-               ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
-       mutex_unlock(&pq->lock);
-
-       return (ret < 0) ? ret : npkts;
-}
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
-                                 struct ipath_user_sdma_queue *pq)
-{
-       int ret = 0;
-
-       mutex_lock(&pq->lock);
-       ipath_user_sdma_hwqueue_clean(dd);
-       ret = ipath_user_sdma_queue_clean(dd, pq);
-       mutex_unlock(&pq->lock);
-
-       return ret;
-}
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
-{
-       return pq->sent_counter;
-}
-
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
-{
-       return pq->counter;
-}
-
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.h b/drivers/staging/rdma/ipath/ipath_user_sdma.h
deleted file mode 100644 (file)
index fc76316..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct ipath_user_sdma_queue;
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
-                          struct ipath_user_sdma_queue *pq,
-                          const struct iovec *iov,
-                          unsigned long dim);
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
-                                 struct ipath_user_sdma_queue *pq);
-
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
-                                struct ipath_user_sdma_queue *pq);
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
deleted file mode 100644 (file)
index 53f9dca..0000000
+++ /dev/null
@@ -1,2376 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-static unsigned int ib_ipath_qp_table_size = 251;
-module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-unsigned int ib_ipath_lkey_table_size = 12;
-module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
-                  S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
-                "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_ipath_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_pds,
-                "Maximum number of protection domains to support");
-
-static unsigned int ib_ipath_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_ipath_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
-                "Maximum number of completion queue entries to support");
-
-unsigned int ib_ipath_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
-                  S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_ipath_max_qps = 16384;
-module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_ipath_max_sges = 0x60;
-module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_ipath_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
-                  S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
-                "Maximum number of multicast groups to support");
-
-unsigned int ib_ipath_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
-                  uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
-                "Maximum number of attached QPs to support");
-
-unsigned int ib_ipath_max_srqs = 1024;
-module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_ipath_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
-                  uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
-                  uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_ipath_disable_sma;
-module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; ipath_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
-       [IB_QPS_RESET] = 0,
-       [IB_QPS_INIT] = IPATH_POST_RECV_OK,
-       [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
-       [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-           IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
-           IPATH_PROCESS_NEXT_SEND_OK,
-       [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-           IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
-       [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-           IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-       [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
-           IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-};
-
-struct ipath_ucontext {
-       struct ib_ucontext ibucontext;
-};
-
-static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
-                                                 *ibucontext)
-{
-       return container_of(ibucontext, struct ipath_ucontext, ibucontext);
-}
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
-       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-       [IB_WR_SEND] = IB_WC_SEND,
-       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-static __be64 sys_image_guid;
-
-/**
- * ipath_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               memcpy(sge->vaddr, data, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               data += len;
-               length -= len;
-       }
-}
-
-/**
- * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
- * @ss: the SGE state
- * @length: the number of bytes to skip
- */
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               length -= len;
-       }
-}
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the ipath_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
-{
-       struct ipath_sge *sg_list = ss->sg_list;
-       struct ipath_sge sge = ss->sge;
-       u8 num_sge = ss->num_sge;
-       u32 ndesc = 1;  /* count the header */
-
-       while (length) {
-               u32 len = sge.length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge.sge_length)
-                       len = sge.sge_length;
-               BUG_ON(len == 0);
-               if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
-                   (len != length && (len & (sizeof(u32) - 1)))) {
-                       ndesc = 0;
-                       break;
-               }
-               ndesc++;
-               sge.vaddr += len;
-               sge.length -= len;
-               sge.sge_length -= len;
-               if (sge.sge_length == 0) {
-                       if (--num_sge)
-                               sge = *sg_list++;
-               } else if (sge.length == 0 && sge.mr != NULL) {
-                       if (++sge.n >= IPATH_SEGSZ) {
-                               if (++sge.m >= sge.mr->mapsz)
-                                       break;
-                               sge.n = 0;
-                       }
-                       sge.vaddr =
-                               sge.mr->map[sge.m]->segs[sge.n].vaddr;
-                       sge.length =
-                               sge.mr->map[sge.m]->segs[sge.n].length;
-               }
-               length -= len;
-       }
-       return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
-                               u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               memcpy(data, sge->vaddr, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               data += len;
-               length -= len;
-       }
-}
-
-/**
- * ipath_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
-{
-       struct ipath_swqe *wqe;
-       u32 next;
-       int i;
-       int j;
-       int acc;
-       int ret;
-       unsigned long flags;
-       struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       if (qp->ibqp.qp_type != IB_QPT_SMI &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               ret = -ENETDOWN;
-               goto bail;
-       }
-
-       /* Check that state is OK to post send. */
-       if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
-               goto bail_inval;
-
-       /* IB spec says that num_sge == 0 is OK. */
-       if (wr->num_sge > qp->s_max_sge)
-               goto bail_inval;
-
-       /*
-        * Don't allow RDMA reads or atomic operations on UC or
-        * undefined operations.
-        * Make sure buffer is large enough to hold the result for atomics.
-        */
-       if (qp->ibqp.qp_type == IB_QPT_UC) {
-               if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
-                       goto bail_inval;
-       } else if (qp->ibqp.qp_type == IB_QPT_UD) {
-               /* Check UD opcode */
-               if (wr->opcode != IB_WR_SEND &&
-                   wr->opcode != IB_WR_SEND_WITH_IMM)
-                       goto bail_inval;
-               /* Check UD destination address PD */
-               if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
-                       goto bail_inval;
-       } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
-               goto bail_inval;
-       else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
-                  (wr->num_sge == 0 ||
-                   wr->sg_list[0].length < sizeof(u64) ||
-                   wr->sg_list[0].addr & (sizeof(u64) - 1)))
-               goto bail_inval;
-       else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
-               goto bail_inval;
-
-       next = qp->s_head + 1;
-       if (next >= qp->s_size)
-               next = 0;
-       if (next == qp->s_last) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       wqe = get_swqe_ptr(qp, qp->s_head);
-
-       if (qp->ibqp.qp_type != IB_QPT_UC &&
-           qp->ibqp.qp_type != IB_QPT_RC)
-               memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
-       else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
-                wr->opcode == IB_WR_RDMA_WRITE ||
-                wr->opcode == IB_WR_RDMA_READ)
-               memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
-       else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
-               memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
-       else
-               memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
-       wqe->length = 0;
-       if (wr->num_sge) {
-               acc = wr->opcode >= IB_WR_RDMA_READ ?
-                       IB_ACCESS_LOCAL_WRITE : 0;
-               for (i = 0, j = 0; i < wr->num_sge; i++) {
-                       u32 length = wr->sg_list[i].length;
-                       int ok;
-
-                       if (length == 0)
-                               continue;
-                       ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
-                                          &wr->sg_list[i], acc);
-                       if (!ok)
-                               goto bail_inval;
-                       wqe->length += length;
-                       j++;
-               }
-               wqe->wr.num_sge = j;
-       }
-       if (qp->ibqp.qp_type == IB_QPT_UC ||
-           qp->ibqp.qp_type == IB_QPT_RC) {
-               if (wqe->length > 0x80000000U)
-                       goto bail_inval;
-       } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
-               goto bail_inval;
-       wqe->ssn = qp->s_ssn++;
-       qp->s_head = next;
-
-       ret = 0;
-       goto bail;
-
-bail_inval:
-       ret = -EINVAL;
-bail:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * ipath_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                          struct ib_send_wr **bad_wr)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       int err = 0;
-
-       for (; wr; wr = wr->next) {
-               err = ipath_post_one_send(qp, wr);
-               if (err) {
-                       *bad_wr = wr;
-                       goto bail;
-               }
-       }
-
-       /* Try to do the send work in the caller's context. */
-       ipath_do_send((unsigned long) qp);
-
-bail:
-       return err;
-}
-
-/**
- * ipath_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-                             struct ib_recv_wr **bad_wr)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_rwq *wq = qp->r_rq.wq;
-       unsigned long flags;
-       int ret;
-
-       /* Check that state is OK to post receive. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
-               *bad_wr = wr;
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       for (; wr; wr = wr->next) {
-               struct ipath_rwqe *wqe;
-               u32 next;
-               int i;
-
-               if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
-                       *bad_wr = wr;
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               spin_lock_irqsave(&qp->r_rq.lock, flags);
-               next = wq->head + 1;
-               if (next >= qp->r_rq.size)
-                       next = 0;
-               if (next == wq->tail) {
-                       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-                       *bad_wr = wr;
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
-               wqe->wr_id = wr->wr_id;
-               wqe->num_sge = wr->num_sge;
-               for (i = 0; i < wr->num_sge; i++)
-                       wqe->sg_list[i] = wr->sg_list[i];
-               /* Make sure queue entry is written before the head index. */
-               smp_wmb();
-               wq->head = next;
-               spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_qp_rcv - processing an incoming packet on a QP
- * @dev: the device the packet came on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void ipath_qp_rcv(struct ipath_ibdev *dev,
-                        struct ipath_ib_header *hdr, int has_grh,
-                        void *data, u32 tlen, struct ipath_qp *qp)
-{
-       /* Check for valid receive state. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               dev->n_pkt_drops++;
-               return;
-       }
-
-       switch (qp->ibqp.qp_type) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               if (ib_ipath_disable_sma)
-                       break;
-               /* FALLTHROUGH */
-       case IB_QPT_UD:
-               ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
-               break;
-
-       case IB_QPT_RC:
-               ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
-               break;
-
-       case IB_QPT_UC:
-               ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
-               break;
-
-       default:
-               break;
-       }
-}
-
-/**
- * ipath_ib_rcv - process an incoming packet
- * @arg: the device pointer
- * @rhdr: the header of the packet
- * @data: the packet data
- * @tlen: the packet length
- *
- * This is called from ipath_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
-                 u32 tlen)
-{
-       struct ipath_ib_header *hdr = rhdr;
-       struct ipath_other_headers *ohdr;
-       struct ipath_qp *qp;
-       u32 qp_num;
-       int lnh;
-       u8 opcode;
-       u16 lid;
-
-       if (unlikely(dev == NULL))
-               goto bail;
-
-       if (unlikely(tlen < 24)) {      /* LRH+BTH+CRC */
-               dev->rcv_errors++;
-               goto bail;
-       }
-
-       /* Check for a valid destination LID (see ch. 7.11.1). */
-       lid = be16_to_cpu(hdr->lrh[1]);
-       if (lid < IPATH_MULTICAST_LID_BASE) {
-               lid &= ~((1 << dev->dd->ipath_lmc) - 1);
-               if (unlikely(lid != dev->dd->ipath_lid)) {
-                       dev->rcv_errors++;
-                       goto bail;
-               }
-       }
-
-       /* Check for GRH */
-       lnh = be16_to_cpu(hdr->lrh[0]) & 3;
-       if (lnh == IPATH_LRH_BTH)
-               ohdr = &hdr->u.oth;
-       else if (lnh == IPATH_LRH_GRH)
-               ohdr = &hdr->u.l.oth;
-       else {
-               dev->rcv_errors++;
-               goto bail;
-       }
-
-       opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
-       dev->opstats[opcode].n_bytes += tlen;
-       dev->opstats[opcode].n_packets++;
-
-       /* Get the destination QP number. */
-       qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
-       if (qp_num == IPATH_MULTICAST_QPN) {
-               struct ipath_mcast *mcast;
-               struct ipath_mcast_qp *p;
-
-               if (lnh != IPATH_LRH_GRH) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-               mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
-               if (mcast == NULL) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-               dev->n_multicast_rcv++;
-               list_for_each_entry_rcu(p, &mcast->qp_list, list)
-                       ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
-               /*
-                * Notify ipath_multicast_detach() if it is waiting for us
-                * to finish.
-                */
-               if (atomic_dec_return(&mcast->refcount) <= 1)
-                       wake_up(&mcast->wait);
-       } else {
-               qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
-               if (qp) {
-                       dev->n_unicast_rcv++;
-                       ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
-                                    tlen, qp);
-                       /*
-                        * Notify ipath_destroy_qp() if it is waiting
-                        * for us to finish.
-                        */
-                       if (atomic_dec_and_test(&qp->refcount))
-                               wake_up(&qp->wait);
-               } else
-                       dev->n_pkt_drops++;
-       }
-
-bail:;
-}
-
-/**
- * ipath_ib_timer - verbs timer
- * @arg: the device pointer
- *
- * This is called from ipath_do_rcv_timer() at interrupt level to check for
- * QPs which need retransmits and to collect performance numbers.
- */
-static void ipath_ib_timer(struct ipath_ibdev *dev)
-{
-       struct ipath_qp *resend = NULL;
-       struct ipath_qp *rnr = NULL;
-       struct list_head *last;
-       struct ipath_qp *qp;
-       unsigned long flags;
-
-       if (dev == NULL)
-               return;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       /* Start filling the next pending queue. */
-       if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
-               dev->pending_index = 0;
-       /* Save any requests still in the new queue, they have timed out. */
-       last = &dev->pending[dev->pending_index];
-       while (!list_empty(last)) {
-               qp = list_entry(last->next, struct ipath_qp, timerwait);
-               list_del_init(&qp->timerwait);
-               qp->timer_next = resend;
-               resend = qp;
-               atomic_inc(&qp->refcount);
-       }
-       last = &dev->rnrwait;
-       if (!list_empty(last)) {
-               qp = list_entry(last->next, struct ipath_qp, timerwait);
-               if (--qp->s_rnr_timeout == 0) {
-                       do {
-                               list_del_init(&qp->timerwait);
-                               qp->timer_next = rnr;
-                               rnr = qp;
-                               atomic_inc(&qp->refcount);
-                               if (list_empty(last))
-                                       break;
-                               qp = list_entry(last->next, struct ipath_qp,
-                                               timerwait);
-                       } while (qp->s_rnr_timeout == 0);
-               }
-       }
-       /*
-        * We should only be in the started state if pma_sample_start != 0
-        */
-       if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
-           --dev->pma_sample_start == 0) {
-               dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
-               ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
-                                       &dev->ipath_rword,
-                                       &dev->ipath_spkts,
-                                       &dev->ipath_rpkts,
-                                       &dev->ipath_xmit_wait);
-       }
-       if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
-               if (dev->pma_sample_interval == 0) {
-                       u64 ta, tb, tc, td, te;
-
-                       dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
-                       ipath_snapshot_counters(dev->dd, &ta, &tb,
-                                               &tc, &td, &te);
-
-                       dev->ipath_sword = ta - dev->ipath_sword;
-                       dev->ipath_rword = tb - dev->ipath_rword;
-                       dev->ipath_spkts = tc - dev->ipath_spkts;
-                       dev->ipath_rpkts = td - dev->ipath_rpkts;
-                       dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
-               } else {
-                       dev->pma_sample_interval--;
-               }
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       /* XXX What if timer fires again while this is running? */
-       while (resend != NULL) {
-               qp = resend;
-               resend = qp->timer_next;
-
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (qp->s_last != qp->s_tail &&
-                   ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
-                       dev->n_timeouts++;
-                       ipath_restart_rc(qp, qp->s_last_psn + 1);
-               }
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-
-               /* Notify ipath_destroy_qp() if it is waiting. */
-               if (atomic_dec_and_test(&qp->refcount))
-                       wake_up(&qp->wait);
-       }
-       while (rnr != NULL) {
-               qp = rnr;
-               rnr = qp->timer_next;
-
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-
-               /* Notify ipath_destroy_qp() if it is waiting. */
-               if (atomic_dec_and_test(&qp->refcount))
-                       wake_up(&qp->wait);
-       }
-}
-
-static void update_sge(struct ipath_sge_state *ss, u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       sge->vaddr += length;
-       sge->length -= length;
-       sge->sge_length -= length;
-       if (sge->sge_length == 0) {
-               if (--ss->num_sge)
-                       *sge = *ss->sg_list++;
-       } else if (sge->length == 0 && sge->mr != NULL) {
-               if (++sge->n >= IPATH_SEGSZ) {
-                       if (++sge->m >= sge->mr->mapsz)
-                               return;
-                       sge->n = 0;
-               }
-               sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
-               sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
-       }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-       return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-       return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-       data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
-       data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-       return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-       return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-       return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-       data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
-       data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-       return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
-                   u32 length, unsigned flush_wc)
-{
-       u32 extra = 0;
-       u32 data = 0;
-       u32 last;
-
-       while (1) {
-               u32 len = ss->sge.length;
-               u32 off;
-
-               if (len > length)
-                       len = length;
-               if (len > ss->sge.sge_length)
-                       len = ss->sge.sge_length;
-               BUG_ON(len == 0);
-               /* If the source address is not aligned, try to align it. */
-               off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
-               if (off) {
-                       u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
-                                           ~(sizeof(u32) - 1));
-                       u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
-                       u32 y;
-
-                       y = sizeof(u32) - off;
-                       if (len > y)
-                               len = y;
-                       if (len + extra >= sizeof(u32)) {
-                               data |= set_upper_bits(v, extra *
-                                                      BITS_PER_BYTE);
-                               len = sizeof(u32) - extra;
-                               if (len == length) {
-                                       last = data;
-                                       break;
-                               }
-                               __raw_writel(data, piobuf);
-                               piobuf++;
-                               extra = 0;
-                               data = 0;
-                       } else {
-                               /* Clear unused upper bytes */
-                               data |= clear_upper_bytes(v, len, extra);
-                               if (len == length) {
-                                       last = data;
-                                       break;
-                               }
-                               extra += len;
-                       }
-               } else if (extra) {
-                       /* Source address is aligned. */
-                       u32 *addr = (u32 *) ss->sge.vaddr;
-                       int shift = extra * BITS_PER_BYTE;
-                       int ushift = 32 - shift;
-                       u32 l = len;
-
-                       while (l >= sizeof(u32)) {
-                               u32 v = *addr;
-
-                               data |= set_upper_bits(v, shift);
-                               __raw_writel(data, piobuf);
-                               data = get_upper_bits(v, ushift);
-                               piobuf++;
-                               addr++;
-                               l -= sizeof(u32);
-                       }
-                       /*
-                        * We still have 'extra' number of bytes leftover.
-                        */
-                       if (l) {
-                               u32 v = *addr;
-
-                               if (l + extra >= sizeof(u32)) {
-                                       data |= set_upper_bits(v, shift);
-                                       len -= l + extra - sizeof(u32);
-                                       if (len == length) {
-                                               last = data;
-                                               break;
-                                       }
-                                       __raw_writel(data, piobuf);
-                                       piobuf++;
-                                       extra = 0;
-                                       data = 0;
-                               } else {
-                                       /* Clear unused upper bytes */
-                                       data |= clear_upper_bytes(v, l,
-                                                                 extra);
-                                       if (len == length) {
-                                               last = data;
-                                               break;
-                                       }
-                                       extra += l;
-                               }
-                       } else if (len == length) {
-                               last = data;
-                               break;
-                       }
-               } else if (len == length) {
-                       u32 w;
-
-                       /*
-                        * Need to round up for the last dword in the
-                        * packet.
-                        */
-                       w = (len + 3) >> 2;
-                       __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
-                       piobuf += w - 1;
-                       last = ((u32 *) ss->sge.vaddr)[w - 1];
-                       break;
-               } else {
-                       u32 w = len >> 2;
-
-                       __iowrite32_copy(piobuf, ss->sge.vaddr, w);
-                       piobuf += w;
-
-                       extra = len & (sizeof(u32) - 1);
-                       if (extra) {
-                               u32 v = ((u32 *) ss->sge.vaddr)[w];
-
-                               /* Clear unused upper bytes */
-                               data = clear_upper_bytes(v, extra, 0);
-                       }
-               }
-               update_sge(ss, len);
-               length -= len;
-       }
-       /* Update address before sending packet. */
-       update_sge(ss, length);
-       if (flush_wc) {
-               /* must flush early everything before trigger word */
-               ipath_flush_wc();
-               __raw_writel(last, piobuf);
-               /* be sure trigger word is written */
-               ipath_flush_wc();
-       } else
-               __raw_writel(last, piobuf);
-}
-
-/*
- * Convert IB rate to delay multiplier.
- */
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
-{
-       switch (rate) {
-       case IB_RATE_2_5_GBPS: return 8;
-       case IB_RATE_5_GBPS:   return 4;
-       case IB_RATE_10_GBPS:  return 2;
-       case IB_RATE_20_GBPS:  return 1;
-       default:               return 0;
-       }
-}
-
-/*
- * Convert delay multiplier to IB rate
- */
-static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
-{
-       switch (mult) {
-       case 8:  return IB_RATE_2_5_GBPS;
-       case 4:  return IB_RATE_5_GBPS;
-       case 2:  return IB_RATE_10_GBPS;
-       case 1:  return IB_RATE_20_GBPS;
-       default: return IB_RATE_PORT_CURRENT;
-       }
-}
-
-static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
-{
-       struct ipath_verbs_txreq *tx = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       if (!list_empty(&dev->txreq_free)) {
-               struct list_head *l = dev->txreq_free.next;
-
-               list_del(l);
-               tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-       return tx;
-}
-
-static inline void put_txreq(struct ipath_ibdev *dev,
-                            struct ipath_verbs_txreq *tx)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       list_add(&tx->txreq.list, &dev->txreq_free);
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-}
-
-static void sdma_complete(void *cookie, int status)
-{
-       struct ipath_verbs_txreq *tx = cookie;
-       struct ipath_qp *qp = tx->qp;
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       unsigned long flags;
-       enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
-               IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
-
-       if (atomic_dec_and_test(&qp->s_dma_busy)) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (tx->wqe)
-                       ipath_send_complete(qp, tx->wqe, ibs);
-               if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
-                    qp->s_last != qp->s_head) ||
-                   (qp->s_flags & IPATH_S_WAIT_DMA))
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               wake_up(&qp->wait_dma);
-       } else if (tx->wqe) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               ipath_send_complete(qp, tx->wqe, ibs);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-       }
-
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
-               kfree(tx->txreq.map_addr);
-       put_txreq(dev, tx);
-
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-}
-
-static void decrement_dma_busy(struct ipath_qp *qp)
-{
-       unsigned long flags;
-
-       if (atomic_dec_and_test(&qp->s_dma_busy)) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
-                    qp->s_last != qp->s_head) ||
-                   (qp->s_flags & IPATH_S_WAIT_DMA))
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               wake_up(&qp->wait_dma);
-       }
-}
-
-/*
- * Compute the number of clock cycles of delay before sending the next packet.
- * The multipliers reflect the number of clocks for the fastest rate so
- * one tick at 4xDDR is 8 ticks at 1xSDR.
- * If the destination port will take longer to receive a packet than
- * the outgoing link can send it, we need to delay sending the next packet
- * by the difference in time it takes the receiver to receive and the sender
- * to send this packet.
- * Note that this delay is always correct for UC and RC but not always
- * optimal for UD. For UD, the destination HCA can be different for each
- * packet, in which case, we could send packets to a different destination
- * while "waiting" for the delay. The overhead for doing this without
- * HW support is more than just paying the cost of delaying some packets
- * unnecessarily.
- */
-static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
-{
-       return (rcv_mult > snd_mult) ?
-               (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-}
-
-static int ipath_verbs_send_dma(struct ipath_qp *qp,
-                               struct ipath_ib_header *hdr, u32 hdrwords,
-                               struct ipath_sge_state *ss, u32 len,
-                               u32 plen, u32 dwords)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_devdata *dd = dev->dd;
-       struct ipath_verbs_txreq *tx;
-       u32 *piobuf;
-       u32 control;
-       u32 ndesc;
-       int ret;
-
-       tx = qp->s_tx;
-       if (tx) {
-               qp->s_tx = NULL;
-               /* resend previously constructed packet */
-               atomic_inc(&qp->s_dma_busy);
-               ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
-               if (ret) {
-                       qp->s_tx = tx;
-                       decrement_dma_busy(qp);
-               }
-               goto bail;
-       }
-
-       tx = get_txreq(dev);
-       if (!tx) {
-               ret = -EBUSY;
-               goto bail;
-       }
-
-       /*
-        * Get the saved delay count we computed for the previous packet
-        * and save the delay count for this packet to be used next time
-        * we get here.
-        */
-       control = qp->s_pkt_delay;
-       qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
-       tx->qp = qp;
-       atomic_inc(&qp->refcount);
-       tx->wqe = qp->s_wqe;
-       tx->txreq.callback = sdma_complete;
-       tx->txreq.callback_cookie = tx;
-       tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
-               IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
-       if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
-               tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
-
-       /* VL15 packets bypass credit check */
-       if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
-               control |= 1ULL << 31;
-               tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
-       }
-
-       if (len) {
-               /*
-                * Don't try to DMA if it takes more descriptors than
-                * the queue holds.
-                */
-               ndesc = ipath_count_sge(ss, len);
-               if (ndesc >= dd->ipath_sdma_descq_cnt)
-                       ndesc = 0;
-       } else
-               ndesc = 1;
-       if (ndesc) {
-               tx->hdr.pbc[0] = cpu_to_le32(plen);
-               tx->hdr.pbc[1] = cpu_to_le32(control);
-               memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
-               tx->txreq.sg_count = ndesc;
-               tx->map_len = (hdrwords + 2) << 2;
-               tx->txreq.map_addr = &tx->hdr;
-               atomic_inc(&qp->s_dma_busy);
-               ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
-               if (ret) {
-                       /* save ss and length in dwords */
-                       tx->ss = ss;
-                       tx->len = dwords;
-                       qp->s_tx = tx;
-                       decrement_dma_busy(qp);
-               }
-               goto bail;
-       }
-
-       /* Allocate a buffer and copy the header and payload to it. */
-       tx->map_len = (plen + 1) << 2;
-       piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
-       if (unlikely(piobuf == NULL)) {
-               ret = -EBUSY;
-               goto err_tx;
-       }
-       tx->txreq.map_addr = piobuf;
-       tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
-       tx->txreq.sg_count = 1;
-
-       *piobuf++ = (__force u32) cpu_to_le32(plen);
-       *piobuf++ = (__force u32) cpu_to_le32(control);
-       memcpy(piobuf, hdr, hdrwords << 2);
-       ipath_copy_from_sge(piobuf + hdrwords, ss, len);
-
-       atomic_inc(&qp->s_dma_busy);
-       ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
-       /*
-        * If we couldn't queue the DMA request, save the info
-        * and try again later rather than destroying the
-        * buffer and undoing the side effects of the copy.
-        */
-       if (ret) {
-               tx->ss = NULL;
-               tx->len = 0;
-               qp->s_tx = tx;
-               decrement_dma_busy(qp);
-       }
-       dev->n_unaligned++;
-       goto bail;
-
-err_tx:
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-       put_txreq(dev, tx);
-bail:
-       return ret;
-}
-
-static int ipath_verbs_send_pio(struct ipath_qp *qp,
-                               struct ipath_ib_header *ibhdr, u32 hdrwords,
-                               struct ipath_sge_state *ss, u32 len,
-                               u32 plen, u32 dwords)
-{
-       struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-       u32 *hdr = (u32 *) ibhdr;
-       u32 __iomem *piobuf;
-       unsigned flush_wc;
-       u32 control;
-       int ret;
-       unsigned long flags;
-
-       piobuf = ipath_getpiobuf(dd, plen, NULL);
-       if (unlikely(piobuf == NULL)) {
-               ret = -EBUSY;
-               goto bail;
-       }
-
-       /*
-        * Get the saved delay count we computed for the previous packet
-        * and save the delay count for this packet to be used next time
-        * we get here.
-        */
-       control = qp->s_pkt_delay;
-       qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
-       /* VL15 packets bypass credit check */
-       if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
-               control |= 1ULL << 31;
-
-       /*
-        * Write the length to the control qword plus any needed flags.
-        * We have to flush after the PBC for correctness on some cpus
-        * or WC buffer can be written out of order.
-        */
-       writeq(((u64) control << 32) | plen, piobuf);
-       piobuf += 2;
-
-       flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
-       if (len == 0) {
-               /*
-                * If there is just the header portion, must flush before
-                * writing last word of header for correctness, and after
-                * the last header word (trigger word).
-                */
-               if (flush_wc) {
-                       ipath_flush_wc();
-                       __iowrite32_copy(piobuf, hdr, hdrwords - 1);
-                       ipath_flush_wc();
-                       __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
-                       ipath_flush_wc();
-               } else
-                       __iowrite32_copy(piobuf, hdr, hdrwords);
-               goto done;
-       }
-
-       if (flush_wc)
-               ipath_flush_wc();
-       __iowrite32_copy(piobuf, hdr, hdrwords);
-       piobuf += hdrwords;
-
-       /* The common case is aligned and contained in one segment. */
-       if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
-                  !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
-               u32 *addr = (u32 *) ss->sge.vaddr;
-
-               /* Update address before sending packet. */
-               update_sge(ss, len);
-               if (flush_wc) {
-                       __iowrite32_copy(piobuf, addr, dwords - 1);
-                       /* must flush early everything before trigger word */
-                       ipath_flush_wc();
-                       __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
-                       /* be sure trigger word is written */
-                       ipath_flush_wc();
-               } else
-                       __iowrite32_copy(piobuf, addr, dwords);
-               goto done;
-       }
-       copy_io(piobuf, ss, len, flush_wc);
-done:
-       if (qp->s_wqe) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- */
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
-                    u32 hdrwords, struct ipath_sge_state *ss, u32 len)
-{
-       struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-       u32 plen;
-       int ret;
-       u32 dwords = (len + 3) >> 2;
-
-       /*
-        * Calculate the send buffer trigger address.
-        * The +1 counts for the pbc control dword following the pbc length.
-        */
-       plen = hdrwords + dwords + 1;
-
-       /*
-        * VL15 packets (IB_QPT_SMI) will always use PIO, so we
-        * can defer SDMA restart until link goes ACTIVE without
-        * worrying about just how we got there.
-        */
-       if (qp->ibqp.qp_type == IB_QPT_SMI ||
-           !(dd->ipath_flags & IPATH_HAS_SEND_DMA))
-               ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
-                                          plen, dwords);
-       else
-               ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
-                                          plen, dwords);
-
-       return ret;
-}
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-                           u64 *rwords, u64 *spkts, u64 *rpkts,
-                           u64 *xmit_wait)
-{
-       int ret;
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               /* no hardware, freeze, etc. */
-               ret = -EINVAL;
-               goto bail;
-       }
-       *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
-       *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-       *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-       *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-       *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_get_counters - get various chip counters
- * @dd: the infinipath device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int ipath_get_counters(struct ipath_devdata *dd,
-                      struct ipath_verbs_counters *cntrs)
-{
-       struct ipath_cregs const *crp = dd->ipath_cregs;
-       int ret;
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               /* no hardware, freeze, etc. */
-               ret = -EINVAL;
-               goto bail;
-       }
-       cntrs->symbol_error_counter =
-               ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
-       cntrs->link_error_recovery_counter =
-               ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
-       /*
-        * The link downed counter counts when the other side downs the
-        * connection.  We add in the number of times we downed the link
-        * due to local link integrity errors to compensate.
-        */
-       cntrs->link_downed_counter =
-               ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
-       cntrs->port_rcv_errors =
-               ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
-               ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
-               ipath_snap_cntr(dd, crp->cr_portovflcnt) +
-               ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
-               ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
-               ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
-               ipath_snap_cntr(dd, crp->cr_erricrccnt) +
-               ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
-               ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
-               ipath_snap_cntr(dd, crp->cr_badformatcnt) +
-               dd->ipath_rxfc_unsupvl_errs;
-       if (crp->cr_rxotherlocalphyerrcnt)
-               cntrs->port_rcv_errors +=
-                       ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
-       if (crp->cr_rxvlerrcnt)
-               cntrs->port_rcv_errors +=
-                       ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
-       cntrs->port_rcv_remphys_errors =
-               ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
-       cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
-       cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
-       cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
-       cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
-       cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
-       cntrs->local_link_integrity_errors =
-               crp->cr_locallinkintegrityerrcnt ?
-               ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
-               ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-                dd->ipath_lli_errs : dd->ipath_lli_errors);
-       cntrs->excessive_buffer_overrun_errors =
-               crp->cr_excessbufferovflcnt ?
-               ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
-               dd->ipath_overrun_thresh_errs;
-       cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
-               ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_ib_piobufavail - callback when a PIO buffer is available
- * @arg: the device pointer
- *
- * This is called from ipath_intr() at interrupt level when a PIO buffer is
- * available after ipath_verbs_send() returned an error that no buffers were
- * available.  Return 1 if we consumed all the PIO buffers and we still have
- * QPs waiting for buffers (for now, just restart the send tasklet and
- * return zero).
- */
-int ipath_ib_piobufavail(struct ipath_ibdev *dev)
-{
-       struct list_head *list;
-       struct ipath_qp *qplist;
-       struct ipath_qp *qp;
-       unsigned long flags;
-
-       if (dev == NULL)
-               goto bail;
-
-       list = &dev->piowait;
-       qplist = NULL;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       while (!list_empty(list)) {
-               qp = list_entry(list->next, struct ipath_qp, piowait);
-               list_del_init(&qp->piowait);
-               qp->pio_next = qplist;
-               qplist = qp;
-               atomic_inc(&qp->refcount);
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       while (qplist != NULL) {
-               qp = qplist;
-               qplist = qp->pio_next;
-
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-
-               /* Notify ipath_destroy_qp() if it is waiting. */
-               if (atomic_dec_and_test(&qp->refcount))
-                       wake_up(&qp->wait);
-       }
-
-bail:
-       return 0;
-}
-
-static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                             struct ib_udata *uhw)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-
-       if (uhw->inlen || uhw->outlen)
-               return -EINVAL;
-
-       memset(props, 0, sizeof(*props));
-
-       props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
-               IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
-               IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
-               IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
-       props->page_size_cap = PAGE_SIZE;
-       props->vendor_id =
-               IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
-       props->vendor_part_id = dev->dd->ipath_deviceid;
-       props->hw_ver = dev->dd->ipath_pcirev;
-
-       props->sys_image_guid = dev->sys_image_guid;
-
-       props->max_mr_size = ~0ull;
-       props->max_qp = ib_ipath_max_qps;
-       props->max_qp_wr = ib_ipath_max_qp_wrs;
-       props->max_sge = ib_ipath_max_sges;
-       props->max_sge_rd = ib_ipath_max_sges;
-       props->max_cq = ib_ipath_max_cqs;
-       props->max_ah = ib_ipath_max_ahs;
-       props->max_cqe = ib_ipath_max_cqes;
-       props->max_mr = dev->lk_table.max;
-       props->max_fmr = dev->lk_table.max;
-       props->max_map_per_fmr = 32767;
-       props->max_pd = ib_ipath_max_pds;
-       props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
-       props->max_qp_init_rd_atom = 255;
-       /* props->max_res_rd_atom */
-       props->max_srq = ib_ipath_max_srqs;
-       props->max_srq_wr = ib_ipath_max_srq_wrs;
-       props->max_srq_sge = ib_ipath_max_srq_sges;
-       /* props->local_ca_ack_delay */
-       props->atomic_cap = IB_ATOMIC_GLOB;
-       props->max_pkeys = ipath_get_npkeys(dev->dd);
-       props->max_mcast_grp = ib_ipath_max_mcast_grps;
-       props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
-       props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
-               props->max_mcast_grp;
-
-       return 0;
-}
-
-const u8 ipath_cvt_physportstate[32] = {
-       [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
-       [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
-       [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
-       [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
-       [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
-       [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
-       [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
-               IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
-               IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
-               IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
-               IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-       [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
-               IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-       [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
-               IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-       [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
-{
-       return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
-}
-
-static int ipath_query_port(struct ib_device *ibdev,
-                           u8 port, struct ib_port_attr *props)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_devdata *dd = dev->dd;
-       enum ib_mtu mtu;
-       u16 lid = dd->ipath_lid;
-       u64 ibcstat;
-
-       memset(props, 0, sizeof(*props));
-       props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
-       props->lmc = dd->ipath_lmc;
-       props->sm_lid = dev->sm_lid;
-       props->sm_sl = dev->sm_sl;
-       ibcstat = dd->ipath_lastibcstat;
-       /* map LinkState to IB portinfo values.  */
-       props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
-
-       /* See phys_state_show() */
-       props->phys_state = /* MEA: assumes shift == 0 */
-               ipath_cvt_physportstate[dd->ipath_lastibcstat &
-               dd->ibcs_lts_mask];
-       props->port_cap_flags = dev->port_cap_flags;
-       props->gid_tbl_len = 1;
-       props->max_msg_sz = 0x80000000;
-       props->pkey_tbl_len = ipath_get_npkeys(dd);
-       props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
-               dev->z_pkey_violations;
-       props->qkey_viol_cntr = dev->qkey_violations;
-       props->active_width = dd->ipath_link_width_active;
-       /* See rate_show() */
-       props->active_speed = dd->ipath_link_speed_active;
-       props->max_vl_num = 1;          /* VLCap = VL0 */
-       props->init_type_reply = 0;
-
-       props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
-       switch (dd->ipath_ibmtu) {
-       case 4096:
-               mtu = IB_MTU_4096;
-               break;
-       case 2048:
-               mtu = IB_MTU_2048;
-               break;
-       case 1024:
-               mtu = IB_MTU_1024;
-               break;
-       case 512:
-               mtu = IB_MTU_512;
-               break;
-       case 256:
-               mtu = IB_MTU_256;
-               break;
-       default:
-               mtu = IB_MTU_2048;
-       }
-       props->active_mtu = mtu;
-       props->subnet_timeout = dev->subnet_timeout;
-
-       return 0;
-}
-
-static int ipath_modify_device(struct ib_device *device,
-                              int device_modify_mask,
-                              struct ib_device_modify *device_modify)
-{
-       int ret;
-
-       if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
-                                  IB_DEVICE_MODIFY_NODE_DESC)) {
-               ret = -EOPNOTSUPP;
-               goto bail;
-       }
-
-       if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
-               memcpy(device->node_desc, device_modify->node_desc, 64);
-
-       if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
-               to_idev(device)->sys_image_guid =
-                       cpu_to_be64(device_modify->sys_image_guid);
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int ipath_modify_port(struct ib_device *ibdev,
-                            u8 port, int port_modify_mask,
-                            struct ib_port_modify *props)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-
-       dev->port_cap_flags |= props->set_port_cap_mask;
-       dev->port_cap_flags &= ~props->clr_port_cap_mask;
-       if (port_modify_mask & IB_PORT_SHUTDOWN)
-               ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
-       if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
-               dev->qkey_violations = 0;
-       return 0;
-}
-
-static int ipath_query_gid(struct ib_device *ibdev, u8 port,
-                          int index, union ib_gid *gid)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       int ret;
-
-       if (index >= 1) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       gid->global.subnet_prefix = dev->gid_prefix;
-       gid->global.interface_id = dev->dd->ipath_guid;
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
-                                   struct ib_ucontext *context,
-                                   struct ib_udata *udata)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_pd *pd;
-       struct ib_pd *ret;
-
-       /*
-        * This is actually totally arbitrary.  Some correctness tests
-        * assume there's a maximum number of PDs that can be allocated.
-        * We don't actually have this limit, but we fail the test if
-        * we allow allocations of more than we report for this value.
-        */
-
-       pd = kmalloc(sizeof *pd, GFP_KERNEL);
-       if (!pd) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       spin_lock(&dev->n_pds_lock);
-       if (dev->n_pds_allocated == ib_ipath_max_pds) {
-               spin_unlock(&dev->n_pds_lock);
-               kfree(pd);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       dev->n_pds_allocated++;
-       spin_unlock(&dev->n_pds_lock);
-
-       /* ib_alloc_pd() will initialize pd->ibpd. */
-       pd->user = udata != NULL;
-
-       ret = &pd->ibpd;
-
-bail:
-       return ret;
-}
-
-static int ipath_dealloc_pd(struct ib_pd *ibpd)
-{
-       struct ipath_pd *pd = to_ipd(ibpd);
-       struct ipath_ibdev *dev = to_idev(ibpd->device);
-
-       spin_lock(&dev->n_pds_lock);
-       dev->n_pds_allocated--;
-       spin_unlock(&dev->n_pds_lock);
-
-       kfree(pd);
-
-       return 0;
-}
-
-/**
- * ipath_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
-                                    struct ib_ah_attr *ah_attr)
-{
-       struct ipath_ah *ah;
-       struct ib_ah *ret;
-       struct ipath_ibdev *dev = to_idev(pd->device);
-       unsigned long flags;
-
-       /* A multicast address requires a GRH (see ch. 8.4.1). */
-       if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
-           ah_attr->dlid != IPATH_PERMISSIVE_LID &&
-           !(ah_attr->ah_flags & IB_AH_GRH)) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       if (ah_attr->dlid == 0) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       if (ah_attr->port_num < 1 ||
-           ah_attr->port_num > pd->device->phys_port_cnt) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
-       if (!ah) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       spin_lock_irqsave(&dev->n_ahs_lock, flags);
-       if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
-               spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-               kfree(ah);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       dev->n_ahs_allocated++;
-       spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
-       /* ib_create_ah() will initialize ah->ibah. */
-       ah->attr = *ah_attr;
-       ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
-
-       ret = &ah->ibah;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int ipath_destroy_ah(struct ib_ah *ibah)
-{
-       struct ipath_ibdev *dev = to_idev(ibah->device);
-       struct ipath_ah *ah = to_iah(ibah);
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->n_ahs_lock, flags);
-       dev->n_ahs_allocated--;
-       spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
-       kfree(ah);
-
-       return 0;
-}
-
-static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
-       struct ipath_ah *ah = to_iah(ibah);
-
-       *ah_attr = ah->attr;
-       ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
-
-       return 0;
-}
-
-/**
- * ipath_get_npkeys - return the size of the PKEY table for port 0
- * @dd: the infinipath device
- */
-unsigned ipath_get_npkeys(struct ipath_devdata *dd)
-{
-       return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
-}
-
-/**
- * ipath_get_pkey - return the indexed PKEY from the port PKEY table
- * @dd: the infinipath device
- * @index: the PKEY index
- */
-unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
-{
-       unsigned ret;
-
-       /* always a kernel port, no locking needed */
-       if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
-               ret = 0;
-       else
-               ret = dd->ipath_pd[0]->port_pkeys[index];
-
-       return ret;
-}
-
-static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
-                           u16 *pkey)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       int ret;
-
-       if (index >= ipath_get_npkeys(dev->dd)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       *pkey = ipath_get_pkey(dev->dd, index);
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the InfiniPath driver
- */
-
-static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
-                                               struct ib_udata *udata)
-{
-       struct ipath_ucontext *context;
-       struct ib_ucontext *ret;
-
-       context = kmalloc(sizeof *context, GFP_KERNEL);
-       if (!context) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       ret = &context->ibucontext;
-
-bail:
-       return ret;
-}
-
-static int ipath_dealloc_ucontext(struct ib_ucontext *context)
-{
-       kfree(to_iucontext(context));
-       return 0;
-}
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev);
-
-static void __verbs_timer(unsigned long arg)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) arg;
-
-       /* Handle verbs layer timeouts. */
-       ipath_ib_timer(dd->verbs_dev);
-
-       mod_timer(&dd->verbs_timer, jiffies + 1);
-}
-
-static int enable_timer(struct ipath_devdata *dd)
-{
-       /*
-        * Early chips had a design flaw where the chip and kernel idea
-        * of the tail register don't always agree, and therefore we won't
-        * get an interrupt on the next packet received.
-        * If the board supports per packet receive interrupts, use it.
-        * Otherwise, the timer function periodically checks for packets
-        * to cover this case.
-        * Either way, the timer is needed for verbs layer related
-        * processing.
-        */
-       if (dd->ipath_flags & IPATH_GPIO_INTR) {
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
-                                0x2074076542310ULL);
-               /* Enable GPIO bit 2 interrupt */
-               dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-                                dd->ipath_gpio_mask);
-       }
-
-       setup_timer(&dd->verbs_timer, __verbs_timer, (unsigned long)dd);
-
-       dd->verbs_timer.expires = jiffies + 1;
-       add_timer(&dd->verbs_timer);
-
-       return 0;
-}
-
-static int disable_timer(struct ipath_devdata *dd)
-{
-       /* Disable GPIO bit 2 interrupt */
-       if (dd->ipath_flags & IPATH_GPIO_INTR) {
-                /* Disable GPIO bit 2 interrupt */
-               dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-                                dd->ipath_gpio_mask);
-               /*
-                * We might want to undo changes to debugportselect,
-                * but how?
-                */
-       }
-
-       del_timer_sync(&dd->verbs_timer);
-
-       return 0;
-}
-
-static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num,
-                               struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       err = ipath_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-       return 0;
-}
-
-/**
- * ipath_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated ipath_ibdev pointer or NULL on error.
- */
-int ipath_register_ib_device(struct ipath_devdata *dd)
-{
-       struct ipath_verbs_counters cntrs;
-       struct ipath_ibdev *idev;
-       struct ib_device *dev;
-       struct ipath_verbs_txreq *tx;
-       unsigned i;
-       int ret;
-
-       idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
-       if (idev == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       dev = &idev->ibdev;
-
-       if (dd->ipath_sdma_descq_cnt) {
-               tx = kmalloc_array(dd->ipath_sdma_descq_cnt, sizeof *tx,
-                                  GFP_KERNEL);
-               if (tx == NULL) {
-                       ret = -ENOMEM;
-                       goto err_tx;
-               }
-       } else
-               tx = NULL;
-       idev->txreq_bufs = tx;
-
-       /* Only need to initialize non-zero fields. */
-       spin_lock_init(&idev->n_pds_lock);
-       spin_lock_init(&idev->n_ahs_lock);
-       spin_lock_init(&idev->n_cqs_lock);
-       spin_lock_init(&idev->n_qps_lock);
-       spin_lock_init(&idev->n_srqs_lock);
-       spin_lock_init(&idev->n_mcast_grps_lock);
-
-       spin_lock_init(&idev->qp_table.lock);
-       spin_lock_init(&idev->lk_table.lock);
-       idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
-       /* Set the prefix to the default value (see ch. 4.1.1) */
-       idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
-
-       ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
-       if (ret)
-               goto err_qp;
-
-       /*
-        * The top ib_ipath_lkey_table_size bits are used to index the
-        * table.  The lower 8 bits can be owned by the user (copied from
-        * the LKEY).  The remaining bits act as a generation number or tag.
-        */
-       idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
-       idev->lk_table.table = kcalloc(idev->lk_table.max,
-                                      sizeof(*idev->lk_table.table),
-                                      GFP_KERNEL);
-       if (idev->lk_table.table == NULL) {
-               ret = -ENOMEM;
-               goto err_lk;
-       }
-       INIT_LIST_HEAD(&idev->pending_mmaps);
-       spin_lock_init(&idev->pending_lock);
-       idev->mmap_offset = PAGE_SIZE;
-       spin_lock_init(&idev->mmap_offset_lock);
-       INIT_LIST_HEAD(&idev->pending[0]);
-       INIT_LIST_HEAD(&idev->pending[1]);
-       INIT_LIST_HEAD(&idev->pending[2]);
-       INIT_LIST_HEAD(&idev->piowait);
-       INIT_LIST_HEAD(&idev->rnrwait);
-       INIT_LIST_HEAD(&idev->txreq_free);
-       idev->pending_index = 0;
-       idev->port_cap_flags =
-               IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
-       if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
-               idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
-       idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
-       idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
-       idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
-       idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
-       idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
-       /* Snapshot current HW counters to "clear" them. */
-       ipath_get_counters(dd, &cntrs);
-       idev->z_symbol_error_counter = cntrs.symbol_error_counter;
-       idev->z_link_error_recovery_counter =
-               cntrs.link_error_recovery_counter;
-       idev->z_link_downed_counter = cntrs.link_downed_counter;
-       idev->z_port_rcv_errors = cntrs.port_rcv_errors;
-       idev->z_port_rcv_remphys_errors =
-               cntrs.port_rcv_remphys_errors;
-       idev->z_port_xmit_discards = cntrs.port_xmit_discards;
-       idev->z_port_xmit_data = cntrs.port_xmit_data;
-       idev->z_port_rcv_data = cntrs.port_rcv_data;
-       idev->z_port_xmit_packets = cntrs.port_xmit_packets;
-       idev->z_port_rcv_packets = cntrs.port_rcv_packets;
-       idev->z_local_link_integrity_errors =
-               cntrs.local_link_integrity_errors;
-       idev->z_excessive_buffer_overrun_errors =
-               cntrs.excessive_buffer_overrun_errors;
-       idev->z_vl15_dropped = cntrs.vl15_dropped;
-
-       for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
-               list_add(&tx->txreq.list, &idev->txreq_free);
-
-       /*
-        * The system image GUID is supposed to be the same for all
-        * IB HCAs in a single system but since there can be other
-        * device types in the system, we can't be sure this is unique.
-        */
-       if (!sys_image_guid)
-               sys_image_guid = dd->ipath_guid;
-       idev->sys_image_guid = sys_image_guid;
-       idev->ib_unit = dd->ipath_unit;
-       idev->dd = dd;
-
-       strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
-       dev->owner = THIS_MODULE;
-       dev->node_guid = dd->ipath_guid;
-       dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
-       dev->uverbs_cmd_mask =
-               (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
-               (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
-               (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
-               (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
-               (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
-               (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
-               (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
-               (1ull << IB_USER_VERBS_CMD_REG_MR)              |
-               (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
-               (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-               (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
-               (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
-               (1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
-               (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
-               (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
-               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
-               (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
-               (1ull << IB_USER_VERBS_CMD_POST_SEND)           |
-               (1ull << IB_USER_VERBS_CMD_POST_RECV)           |
-               (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
-               (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
-               (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
-               (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
-               (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
-               (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
-       dev->node_type = RDMA_NODE_IB_CA;
-       dev->phys_port_cnt = 1;
-       dev->num_comp_vectors = 1;
-       dev->dma_device = &dd->pcidev->dev;
-       dev->query_device = ipath_query_device;
-       dev->modify_device = ipath_modify_device;
-       dev->query_port = ipath_query_port;
-       dev->modify_port = ipath_modify_port;
-       dev->query_pkey = ipath_query_pkey;
-       dev->query_gid = ipath_query_gid;
-       dev->alloc_ucontext = ipath_alloc_ucontext;
-       dev->dealloc_ucontext = ipath_dealloc_ucontext;
-       dev->alloc_pd = ipath_alloc_pd;
-       dev->dealloc_pd = ipath_dealloc_pd;
-       dev->create_ah = ipath_create_ah;
-       dev->destroy_ah = ipath_destroy_ah;
-       dev->query_ah = ipath_query_ah;
-       dev->create_srq = ipath_create_srq;
-       dev->modify_srq = ipath_modify_srq;
-       dev->query_srq = ipath_query_srq;
-       dev->destroy_srq = ipath_destroy_srq;
-       dev->create_qp = ipath_create_qp;
-       dev->modify_qp = ipath_modify_qp;
-       dev->query_qp = ipath_query_qp;
-       dev->destroy_qp = ipath_destroy_qp;
-       dev->post_send = ipath_post_send;
-       dev->post_recv = ipath_post_receive;
-       dev->post_srq_recv = ipath_post_srq_receive;
-       dev->create_cq = ipath_create_cq;
-       dev->destroy_cq = ipath_destroy_cq;
-       dev->resize_cq = ipath_resize_cq;
-       dev->poll_cq = ipath_poll_cq;
-       dev->req_notify_cq = ipath_req_notify_cq;
-       dev->get_dma_mr = ipath_get_dma_mr;
-       dev->reg_user_mr = ipath_reg_user_mr;
-       dev->dereg_mr = ipath_dereg_mr;
-       dev->alloc_fmr = ipath_alloc_fmr;
-       dev->map_phys_fmr = ipath_map_phys_fmr;
-       dev->unmap_fmr = ipath_unmap_fmr;
-       dev->dealloc_fmr = ipath_dealloc_fmr;
-       dev->attach_mcast = ipath_multicast_attach;
-       dev->detach_mcast = ipath_multicast_detach;
-       dev->process_mad = ipath_process_mad;
-       dev->mmap = ipath_mmap;
-       dev->dma_ops = &ipath_dma_mapping_ops;
-       dev->get_port_immutable = ipath_port_immutable;
-
-       snprintf(dev->node_desc, sizeof(dev->node_desc),
-                IPATH_IDSTR " %s", init_utsname()->nodename);
-
-       ret = ib_register_device(dev, NULL);
-       if (ret)
-               goto err_reg;
-
-       ret = ipath_verbs_register_sysfs(dev);
-       if (ret)
-               goto err_class;
-
-       enable_timer(dd);
-
-       goto bail;
-
-err_class:
-       ib_unregister_device(dev);
-err_reg:
-       kfree(idev->lk_table.table);
-err_lk:
-       kfree(idev->qp_table.table);
-err_qp:
-       kfree(idev->txreq_bufs);
-err_tx:
-       ib_dealloc_device(dev);
-       ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-       idev = NULL;
-
-bail:
-       dd->verbs_dev = idev;
-       return ret;
-}
-
-void ipath_unregister_ib_device(struct ipath_ibdev *dev)
-{
-       struct ib_device *ibdev = &dev->ibdev;
-       u32 qps_inuse;
-
-       ib_unregister_device(ibdev);
-
-       disable_timer(dev->dd);
-
-       if (!list_empty(&dev->pending[0]) ||
-           !list_empty(&dev->pending[1]) ||
-           !list_empty(&dev->pending[2]))
-               ipath_dev_err(dev->dd, "pending list not empty!\n");
-       if (!list_empty(&dev->piowait))
-               ipath_dev_err(dev->dd, "piowait list not empty!\n");
-       if (!list_empty(&dev->rnrwait))
-               ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
-       if (!ipath_mcast_tree_empty())
-               ipath_dev_err(dev->dd, "multicast table memory leak!\n");
-       /*
-        * Note that ipath_unregister_ib_device() can be called before all
-        * the QPs are destroyed!
-        */
-       qps_inuse = ipath_free_all_qps(&dev->qp_table);
-       if (qps_inuse)
-               ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
-                       qps_inuse);
-       kfree(dev->qp_table.table);
-       kfree(dev->lk_table.table);
-       kfree(dev->txreq_bufs);
-       ib_dealloc_device(ibdev);
-}
-
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_ibdev *dev =
-               container_of(device, struct ipath_ibdev, ibdev.dev);
-
-       return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_ibdev *dev =
-               container_of(device, struct ipath_ibdev, ibdev.dev);
-       int ret;
-
-       ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
-       if (ret < 0)
-               goto bail;
-       strcat(buf, "\n");
-       ret = strlen(buf);
-
-bail:
-       return ret;
-}
-
-static ssize_t show_stats(struct device *device, struct device_attribute *attr,
-                         char *buf)
-{
-       struct ipath_ibdev *dev =
-               container_of(device, struct ipath_ibdev, ibdev.dev);
-       int i;
-       int len;
-
-       len = sprintf(buf,
-                     "RC resends  %d\n"
-                     "RC no QACK  %d\n"
-                     "RC ACKs     %d\n"
-                     "RC SEQ NAKs %d\n"
-                     "RC RDMA seq %d\n"
-                     "RC RNR NAKs %d\n"
-                     "RC OTH NAKs %d\n"
-                     "RC timeouts %d\n"
-                     "RC RDMA dup %d\n"
-                     "piobuf wait %d\n"
-                     "unaligned   %d\n"
-                     "PKT drops   %d\n"
-                     "WQE errs    %d\n",
-                     dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
-                     dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
-                     dev->n_other_naks, dev->n_timeouts,
-                     dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
-                     dev->n_pkt_drops, dev->n_wqe_errs);
-       for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
-               const struct ipath_opcode_stats *si = &dev->opstats[i];
-
-               if (!si->n_packets && !si->n_bytes)
-                       continue;
-               len += sprintf(buf + len, "%02x %llu/%llu\n", i,
-                              (unsigned long long) si->n_packets,
-                              (unsigned long long) si->n_bytes);
-       }
-       return len;
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
-
-static struct device_attribute *ipath_class_attributes[] = {
-       &dev_attr_hw_rev,
-       &dev_attr_hca_type,
-       &dev_attr_board_id,
-       &dev_attr_stats
-};
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev)
-{
-       int i;
-       int ret;
-
-       for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
-               ret = device_create_file(&dev->dev,
-                                      ipath_class_attributes[i]);
-               if (ret)
-                       goto bail;
-       }
-       return 0;
-bail:
-       for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
-               device_remove_file(&dev->dev, ipath_class_attributes[i]);
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
deleted file mode 100644 (file)
index 6c70a89..0000000
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef IPATH_VERBS_H
-#define IPATH_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-
-#include "ipath_kernel.h"
-
-#define IPATH_MAX_RDMA_ATOMIC  4
-
-#define QPN_MAX                 (1 << 24)
-#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define IPATH_UVERBS_ABI_VERSION       2
-
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE     (IB_CQ_NEXT_COMP + 1)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK                     0x20
-#define IB_NAK_PSN_ERROR               0x60
-#define IB_NAK_INVALID_REQUEST         0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR     0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST      0x64
-
-/* Flags for checking QP state (see ib_ipath_state_ops[]) */
-#define IPATH_POST_SEND_OK             0x01
-#define IPATH_POST_RECV_OK             0x02
-#define IPATH_PROCESS_RECV_OK          0x04
-#define IPATH_PROCESS_SEND_OK          0x08
-#define IPATH_PROCESS_NEXT_SEND_OK     0x10
-#define IPATH_FLUSH_SEND               0x20
-#define IPATH_FLUSH_RECV               0x40
-#define IPATH_PROCESS_OR_FLUSH_SEND \
-       (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE      0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED   0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING   0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA  cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA   cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS  cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS   cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT  cpu_to_be16(0x0005)
-
-struct ib_reth {
-       __be64 vaddr;
-       __be32 rkey;
-       __be32 length;
-} __attribute__ ((packed));
-
-struct ib_atomic_eth {
-       __be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
-       __be32 rkey;
-       __be64 swap_data;
-       __be64 compare_data;
-} __attribute__ ((packed));
-
-struct ipath_other_headers {
-       __be32 bth[3];
-       union {
-               struct {
-                       __be32 deth[2];
-                       __be32 imm_data;
-               } ud;
-               struct {
-                       struct ib_reth reth;
-                       __be32 imm_data;
-               } rc;
-               struct {
-                       __be32 aeth;
-                       __be32 atomic_ack_eth[2];
-               } at;
-               __be32 imm_data;
-               __be32 aeth;
-               struct ib_atomic_eth atomic_eth;
-       } u;
-} __attribute__ ((packed));
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
- * will be in the eager header buffer.  The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct ipath_ib_header {
-       __be16 lrh[4];
-       union {
-               struct {
-                       struct ib_grh grh;
-                       struct ipath_other_headers oth;
-               } l;
-               struct ipath_other_headers oth;
-       } u;
-} __attribute__ ((packed));
-
-struct ipath_pio_header {
-       __le32 pbc[2];
-       struct ipath_ib_header hdr;
-} __attribute__ ((packed));
-
-/*
- * There is one struct ipath_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct ipath_mcast_qp.
- */
-struct ipath_mcast_qp {
-       struct list_head list;
-       struct ipath_qp *qp;
-};
-
-struct ipath_mcast {
-       struct rb_node rb_node;
-       union ib_gid mgid;
-       struct list_head qp_list;
-       wait_queue_head_t wait;
-       atomic_t refcount;
-       int n_attached;
-};
-
-/* Protection domain */
-struct ipath_pd {
-       struct ib_pd ibpd;
-       int user;               /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct ipath_ah {
-       struct ib_ah ibah;
-       struct ib_ah_attr attr;
-};
-
-/*
- * This structure is used by ipath_mmap() to validate an offset
- * when an mmap() request is made.  The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct ipath_mmap_info {
-       struct list_head pending_mmaps;
-       struct ib_ucontext *context;
-       void *obj;
-       __u64 offset;
-       struct kref ref;
-       unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct ipath_cq_wc {
-       u32 head;               /* index of next entry to fill */
-       u32 tail;               /* index of next ib_poll_cq() entry */
-       union {
-               /* these are actually size ibcq.cqe + 1 */
-               struct ib_uverbs_wc uqueue[0];
-               struct ib_wc kqueue[0];
-       };
-};
-
-/*
- * The completion queue structure.
- */
-struct ipath_cq {
-       struct ib_cq ibcq;
-       struct tasklet_struct comptask;
-       spinlock_t lock;
-       u8 notify;
-       u8 triggered;
-       struct ipath_cq_wc *queue;
-       struct ipath_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct ipath_seg {
-       void *vaddr;
-       size_t length;
-};
-
-/* The number of ipath_segs that fit in a page. */
-#define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
-
-struct ipath_segarray {
-       struct ipath_seg segs[IPATH_SEGSZ];
-};
-
-struct ipath_mregion {
-       struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
-       u64 user_base;          /* User's address for this region */
-       u64 iova;               /* IB start address of this region */
-       size_t length;
-       u32 lkey;
-       u32 offset;             /* offset (bytes) to start of region */
-       int access_flags;
-       u32 max_segs;           /* number of ipath_segs in all the arrays */
-       u32 mapsz;              /* size of the map array */
-       struct ipath_segarray *map[0];  /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct ipath_sge {
-       struct ipath_mregion *mr;
-       void *vaddr;            /* kernel virtual address of segment */
-       u32 sge_length;         /* length of the SGE */
-       u32 length;             /* remaining length of the segment */
-       u16 m;                  /* current index: mr->map[m] */
-       u16 n;                  /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct ipath_mr {
-       struct ib_mr ibmr;
-       struct ib_umem *umem;
-       struct ipath_mregion mr;        /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct ipath_swqe {
-       union {
-               struct ib_send_wr wr;   /* don't use wr.sg_list */
-               struct ib_ud_wr ud_wr;
-               struct ib_rdma_wr rdma_wr;
-               struct ib_atomic_wr atomic_wr;
-       };
-
-       u32 psn;                /* first packet sequence number */
-       u32 lpsn;               /* last packet sequence number */
-       u32 ssn;                /* send sequence number */
-       u32 length;             /* total length of data in sg_list */
-       struct ipath_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct ipath_rwqe {
-       u64 wr_id;
-       u8 num_sge;
-       struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct ipath_rwq {
-       u32 head;               /* new work requests posted to the head */
-       u32 tail;               /* receives pull requests from here. */
-       struct ipath_rwqe wq[0];
-};
-
-struct ipath_rq {
-       struct ipath_rwq *wq;
-       spinlock_t lock;
-       u32 size;               /* size of RWQE array */
-       u8 max_sge;
-};
-
-struct ipath_srq {
-       struct ib_srq ibsrq;
-       struct ipath_rq rq;
-       struct ipath_mmap_info *ip;
-       /* send signal when number of RWQEs < limit */
-       u32 limit;
-};
-
-struct ipath_sge_state {
-       struct ipath_sge *sg_list;      /* next SGE to be used if any */
-       struct ipath_sge sge;   /* progress state for the current SGE */
-       u8 num_sge;
-       u8 static_rate;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct ipath_ack_entry {
-       u8 opcode;
-       u8 sent;
-       u32 psn;
-       union {
-               struct ipath_sge_state rdma_sge;
-               u64 atomic_data;
-       };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct ipath_qp {
-       struct ib_qp ibqp;
-       struct ipath_qp *next;          /* link list for QPN hash table */
-       struct ipath_qp *timer_next;    /* link list for ipath_ib_timer() */
-       struct ipath_qp *pio_next;      /* link for ipath_ib_piobufavail() */
-       struct list_head piowait;       /* link for wait PIO buf */
-       struct list_head timerwait;     /* link for waiting for timeouts */
-       struct ib_ah_attr remote_ah_attr;
-       struct ipath_ib_header s_hdr;   /* next packet header to send */
-       atomic_t refcount;
-       wait_queue_head_t wait;
-       wait_queue_head_t wait_dma;
-       struct tasklet_struct s_task;
-       struct ipath_mmap_info *ip;
-       struct ipath_sge_state *s_cur_sge;
-       struct ipath_verbs_txreq *s_tx;
-       struct ipath_sge_state s_sge;   /* current send request data */
-       struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
-       struct ipath_sge_state s_ack_rdma_sge;
-       struct ipath_sge_state s_rdma_read_sge;
-       struct ipath_sge_state r_sge;   /* current receive data */
-       spinlock_t s_lock;
-       atomic_t s_dma_busy;
-       u16 s_pkt_delay;
-       u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
-       u32 s_cur_size;         /* size of send packet in bytes */
-       u32 s_len;              /* total length of s_sge */
-       u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
-       u32 s_next_psn;         /* PSN for next request */
-       u32 s_last_psn;         /* last response PSN processed */
-       u32 s_psn;              /* current packet sequence number */
-       u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
-       u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
-       u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
-       u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
-       u64 r_wr_id;            /* ID for current receive WQE */
-       unsigned long r_aflags;
-       u32 r_len;              /* total length of r_sge */
-       u32 r_rcv_len;          /* receive data len processed */
-       u32 r_psn;              /* expected rcv packet sequence number */
-       u32 r_msn;              /* message sequence number */
-       u8 state;               /* QP state */
-       u8 s_state;             /* opcode of last packet sent */
-       u8 s_ack_state;         /* opcode of packet to ACK */
-       u8 s_nak_state;         /* non-zero if NAK is pending */
-       u8 r_state;             /* opcode of last packet received */
-       u8 r_nak_state;         /* non-zero if NAK is pending */
-       u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
-       u8 r_flags;
-       u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
-       u8 r_head_ack_queue;    /* index into s_ack_queue[] */
-       u8 qp_access_flags;
-       u8 s_max_sge;           /* size of s_wq->sg_list */
-       u8 s_retry_cnt;         /* number of times to retry */
-       u8 s_rnr_retry_cnt;
-       u8 s_retry;             /* requester retry counter */
-       u8 s_rnr_retry;         /* requester RNR retry counter */
-       u8 s_pkey_index;        /* PKEY index to use */
-       u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
-       u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
-       u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
-       u8 s_flags;
-       u8 s_dmult;
-       u8 s_draining;
-       u8 timeout;             /* Timeout for this QP */
-       enum ib_mtu path_mtu;
-       u32 remote_qpn;
-       u32 qkey;               /* QKEY for this QP (for UD or RD) */
-       u32 s_size;             /* send work queue size */
-       u32 s_head;             /* new entries added here */
-       u32 s_tail;             /* next entry to process */
-       u32 s_cur;              /* current work queue entry */
-       u32 s_last;             /* last un-ACK'ed entry */
-       u32 s_ssn;              /* SSN of tail entry */
-       u32 s_lsn;              /* limit sequence number (credit) */
-       struct ipath_swqe *s_wq;        /* send work queue */
-       struct ipath_swqe *s_wqe;
-       struct ipath_sge *r_ud_sg_list;
-       struct ipath_rq r_rq;           /* receive work queue */
-       struct ipath_sge r_sg_list[0];  /* verified SGEs */
-};
-
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define IPATH_R_WRID_VALID     0
-
-/*
- * Bit definitions for r_flags.
- */
-#define IPATH_R_REUSE_SGE      0x01
-#define IPATH_R_RDMAR_SEQ      0x02
-
-/*
- * Bit definitions for s_flags.
- *
- * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
- *                        before processing the next SWQE
- * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
- *                        before processing the next SWQE
- * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
- * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- *                   next send completion entry not via send DMA.
- */
-#define IPATH_S_SIGNAL_REQ_WR  0x01
-#define IPATH_S_FENCE_PENDING  0x02
-#define IPATH_S_RDMAR_PENDING  0x04
-#define IPATH_S_ACK_PENDING    0x08
-#define IPATH_S_BUSY           0x10
-#define IPATH_S_WAITING                0x20
-#define IPATH_S_WAIT_SSN_CREDIT        0x40
-#define IPATH_S_WAIT_DMA       0x80
-
-#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
-       IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
-
-#define IPATH_PSN_CREDIT       512
-
-/*
- * Since struct ipath_swqe is not a fixed size, we can't simply index into
- * struct ipath_qp.s_wq.  This function does the array index computation.
- */
-static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
-                                             unsigned n)
-{
-       return (struct ipath_swqe *)((char *)qp->s_wq +
-                                    (sizeof(struct ipath_swqe) +
-                                     qp->s_max_sge *
-                                     sizeof(struct ipath_sge)) * n);
-}
-
-/*
- * Since struct ipath_rwqe is not a fixed size, we can't simply index into
- * struct ipath_rwq.wq.  This function does the array index computation.
- */
-static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
-                                             unsigned n)
-{
-       return (struct ipath_rwqe *)
-               ((char *) rq->wq->wq +
-                (sizeof(struct ipath_rwqe) +
-                 rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
-       atomic_t n_free;
-       void *page;
-};
-
-struct ipath_qp_table {
-       spinlock_t lock;
-       u32 last;               /* last QP number allocated */
-       u32 max;                /* size of the hash table */
-       u32 nmaps;              /* size of the map table */
-       struct ipath_qp **table;
-       /* bit map of free numbers */
-       struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-struct ipath_lkey_table {
-       spinlock_t lock;
-       u32 next;               /* next unused index (speeds search) */
-       u32 gen;                /* generation count */
-       u32 max;                /* size of the table */
-       struct ipath_mregion **table;
-};
-
-struct ipath_opcode_stats {
-       u64 n_packets;          /* number of packets */
-       u64 n_bytes;            /* total number of bytes */
-};
-
-struct ipath_ibdev {
-       struct ib_device ibdev;
-       struct ipath_devdata *dd;
-       struct list_head pending_mmaps;
-       spinlock_t mmap_offset_lock;
-       u32 mmap_offset;
-       int ib_unit;            /* This is the device number */
-       u16 sm_lid;             /* in host order */
-       u8 sm_sl;
-       u8 mkeyprot;
-       /* non-zero when timer is set */
-       unsigned long mkey_lease_timeout;
-
-       /* The following fields are really per port. */
-       struct ipath_qp_table qp_table;
-       struct ipath_lkey_table lk_table;
-       struct list_head pending[3];    /* FIFO of QPs waiting for ACKs */
-       struct list_head piowait;       /* list for wait PIO buf */
-       struct list_head txreq_free;
-       void *txreq_bufs;
-       /* list of QPs waiting for RNR timer */
-       struct list_head rnrwait;
-       spinlock_t pending_lock;
-       __be64 sys_image_guid;  /* in network order */
-       __be64 gid_prefix;      /* in network order */
-       __be64 mkey;
-
-       u32 n_pds_allocated;    /* number of PDs allocated for device */
-       spinlock_t n_pds_lock;
-       u32 n_ahs_allocated;    /* number of AHs allocated for device */
-       spinlock_t n_ahs_lock;
-       u32 n_cqs_allocated;    /* number of CQs allocated for device */
-       spinlock_t n_cqs_lock;
-       u32 n_qps_allocated;    /* number of QPs allocated for device */
-       spinlock_t n_qps_lock;
-       u32 n_srqs_allocated;   /* number of SRQs allocated for device */
-       spinlock_t n_srqs_lock;
-       u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
-       spinlock_t n_mcast_grps_lock;
-
-       u64 ipath_sword;        /* total dwords sent (sample result) */
-       u64 ipath_rword;        /* total dwords received (sample result) */
-       u64 ipath_spkts;        /* total packets sent (sample result) */
-       u64 ipath_rpkts;        /* total packets received (sample result) */
-       /* # of ticks no data sent (sample result) */
-       u64 ipath_xmit_wait;
-       u64 rcv_errors;         /* # of packets with SW detected rcv errs */
-       u64 n_unicast_xmit;     /* total unicast packets sent */
-       u64 n_unicast_rcv;      /* total unicast packets received */
-       u64 n_multicast_xmit;   /* total multicast packets sent */
-       u64 n_multicast_rcv;    /* total multicast packets received */
-       u64 z_symbol_error_counter;             /* starting count for PMA */
-       u64 z_link_error_recovery_counter;      /* starting count for PMA */
-       u64 z_link_downed_counter;              /* starting count for PMA */
-       u64 z_port_rcv_errors;                  /* starting count for PMA */
-       u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
-       u64 z_port_xmit_discards;               /* starting count for PMA */
-       u64 z_port_xmit_data;                   /* starting count for PMA */
-       u64 z_port_rcv_data;                    /* starting count for PMA */
-       u64 z_port_xmit_packets;                /* starting count for PMA */
-       u64 z_port_rcv_packets;                 /* starting count for PMA */
-       u32 z_pkey_violations;                  /* starting count for PMA */
-       u32 z_local_link_integrity_errors;      /* starting count for PMA */
-       u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
-       u32 z_vl15_dropped;                     /* starting count for PMA */
-       u32 n_rc_resends;
-       u32 n_rc_acks;
-       u32 n_rc_qacks;
-       u32 n_seq_naks;
-       u32 n_rdma_seq;
-       u32 n_rnr_naks;
-       u32 n_other_naks;
-       u32 n_timeouts;
-       u32 n_pkt_drops;
-       u32 n_vl15_dropped;
-       u32 n_wqe_errs;
-       u32 n_rdma_dup_busy;
-       u32 n_piowait;
-       u32 n_unaligned;
-       u32 port_cap_flags;
-       u32 pma_sample_start;
-       u32 pma_sample_interval;
-       __be16 pma_counter_select[5];
-       u16 pma_tag;
-       u16 qkey_violations;
-       u16 mkey_violations;
-       u16 mkey_lease_period;
-       u16 pending_index;      /* which pending queue is active */
-       u8 pma_sample_status;
-       u8 subnet_timeout;
-       u8 vl_high_limit;
-       struct ipath_opcode_stats opstats[128];
-};
-
-struct ipath_verbs_counters {
-       u64 symbol_error_counter;
-       u64 link_error_recovery_counter;
-       u64 link_downed_counter;
-       u64 port_rcv_errors;
-       u64 port_rcv_remphys_errors;
-       u64 port_xmit_discards;
-       u64 port_xmit_data;
-       u64 port_rcv_data;
-       u64 port_xmit_packets;
-       u64 port_rcv_packets;
-       u32 local_link_integrity_errors;
-       u32 excessive_buffer_overrun_errors;
-       u32 vl15_dropped;
-};
-
-struct ipath_verbs_txreq {
-       struct ipath_qp         *qp;
-       struct ipath_swqe       *wqe;
-       u32                      map_len;
-       u32                      len;
-       struct ipath_sge_state  *ss;
-       struct ipath_pio_header  hdr;
-       struct ipath_sdma_txreq  txreq;
-};
-
-static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
-{
-       return container_of(ibmr, struct ipath_mr, ibmr);
-}
-
-static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
-{
-       return container_of(ibpd, struct ipath_pd, ibpd);
-}
-
-static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
-{
-       return container_of(ibah, struct ipath_ah, ibah);
-}
-
-static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
-{
-       return container_of(ibcq, struct ipath_cq, ibcq);
-}
-
-static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
-{
-       return container_of(ibsrq, struct ipath_srq, ibsrq);
-}
-
-static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
-{
-       return container_of(ibqp, struct ipath_qp, ibqp);
-}
-
-static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
-{
-       return container_of(ibdev, struct ipath_ibdev, ibdev);
-}
-
-/*
- * This must be called with s_lock held.
- */
-static inline void ipath_schedule_send(struct ipath_qp *qp)
-{
-       if (qp->s_flags & IPATH_S_ANY_WAIT)
-               qp->s_flags &= ~IPATH_S_ANY_WAIT;
-       if (!(qp->s_flags & IPATH_S_BUSY))
-               tasklet_hi_schedule(&qp->s_task);
-}
-
-int ipath_process_mad(struct ib_device *ibdev,
-                     int mad_flags,
-                     u8 port_num,
-                     const struct ib_wc *in_wc,
-                     const struct ib_grh *in_grh,
-                     const struct ib_mad_hdr *in, size_t in_mad_size,
-                     struct ib_mad_hdr *out, size_t *out_mad_size,
-                     u16 *out_mad_pkey_index);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int ipath_cmp24(u32 a, u32 b)
-{
-       return (((int) a) - ((int) b)) << 8;
-}
-
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-                           u64 *rwords, u64 *spkts, u64 *rpkts,
-                           u64 *xmit_wait);
-
-int ipath_get_counters(struct ipath_devdata *dd,
-                      struct ipath_verbs_counters *cntrs);
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_mcast_tree_empty(void);
-
-__be32 ipath_compute_aeth(struct ipath_qp *qp);
-
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
-
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
-                             struct ib_qp_init_attr *init_attr,
-                             struct ib_udata *udata);
-
-int ipath_destroy_qp(struct ib_qp *ibqp);
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                   int attr_mask, struct ib_udata *udata);
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                  int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
-
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
-
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
-
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
-
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
-                    u32 hdrwords, struct ipath_sge_state *ss, u32 len);
-
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
-
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
-
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
-
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
-                    struct ipath_mregion *mr);
-
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
-
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
-                 struct ib_sge *sge, int acc);
-
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
-                 u32 len, u64 vaddr, u32 rkey, int acc);
-
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
-                          struct ib_recv_wr **bad_wr);
-
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
-                               struct ib_srq_init_attr *srq_init_attr,
-                               struct ib_udata *udata);
-
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                    enum ib_srq_attr_mask attr_mask,
-                    struct ib_udata *udata);
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int ipath_destroy_srq(struct ib_srq *ibsrq);
-
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
-
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
-                             const struct ib_cq_init_attr *attr,
-                             struct ib_ucontext *context,
-                             struct ib_udata *udata);
-
-int ipath_destroy_cq(struct ib_cq *ibcq);
-
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                               u64 virt_addr, int mr_access_flags,
-                               struct ib_udata *udata);
-
-int ipath_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-                              struct ib_fmr_attr *fmr_attr);
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
-                      int list_len, u64 iova);
-
-int ipath_unmap_fmr(struct list_head *fmr_list);
-
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
-
-void ipath_release_mmap_info(struct kref *ref);
-
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
-                                              u32 size,
-                                              struct ib_ucontext *context,
-                                              void *obj);
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
-                           struct ipath_mmap_info *ip,
-                           u32 size, void *obj);
-
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-void ipath_insert_rnr_queue(struct ipath_qp *qp);
-
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
-                  u32 *lengthp, struct ipath_sge_state *ss);
-
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
-
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
-                  struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
-                          struct ipath_other_headers *ohdr,
-                          u32 bth0, u32 bth2);
-
-void ipath_do_send(unsigned long data);
-
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
-                        enum ib_wc_status status);
-
-int ipath_make_rc_req(struct ipath_qp *qp);
-
-int ipath_make_uc_req(struct ipath_qp *qp);
-
-int ipath_make_ud_req(struct ipath_qp *qp);
-
-int ipath_register_ib_device(struct ipath_devdata *);
-
-void ipath_unregister_ib_device(struct ipath_ibdev *);
-
-void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
-
-int ipath_ib_piobufavail(struct ipath_ibdev *);
-
-unsigned ipath_get_npkeys(struct ipath_devdata *);
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *);
-
-unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
-
-extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
-
-/*
- * Below converts HCA-specific LinkTrainingState to IB PhysPortState
- * values.
- */
-extern const u8 ipath_cvt_physportstate[];
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-
-extern const int ib_ipath_state_ops[];
-
-extern unsigned int ib_ipath_lkey_table_size;
-
-extern unsigned int ib_ipath_max_cqes;
-
-extern unsigned int ib_ipath_max_cqs;
-
-extern unsigned int ib_ipath_max_qp_wrs;
-
-extern unsigned int ib_ipath_max_qps;
-
-extern unsigned int ib_ipath_max_sges;
-
-extern unsigned int ib_ipath_max_mcast_grps;
-
-extern unsigned int ib_ipath_max_mcast_qp_attached;
-
-extern unsigned int ib_ipath_max_srqs;
-
-extern unsigned int ib_ipath_max_srq_sges;
-
-extern unsigned int ib_ipath_max_srq_wrs;
-
-extern const u32 ib_ipath_rnr_table[];
-
-extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
-
-#endif                         /* IPATH_VERBS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
deleted file mode 100644 (file)
index 72d476f..0000000
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-#include <linux/slab.h>
-
-#include "ipath_verbs.h"
-
-/*
- * Global table of GID to attached QPs.
- * The table is global to all ipath devices since a send from one QP/device
- * needs to be locally routed to any locally attached QPs on the same
- * or different device.
- */
-static struct rb_root mcast_tree;
-static DEFINE_SPINLOCK(mcast_lock);
-
-/**
- * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
-{
-       struct ipath_mcast_qp *mqp;
-
-       mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
-       if (!mqp)
-               goto bail;
-
-       mqp->qp = qp;
-       atomic_inc(&qp->refcount);
-
-bail:
-       return mqp;
-}
-
-static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
-{
-       struct ipath_qp *qp = mqp->qp;
-
-       /* Notify ipath_destroy_qp() if it is waiting. */
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-
-       kfree(mqp);
-}
-
-/**
- * ipath_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
-{
-       struct ipath_mcast *mcast;
-
-       mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
-       if (!mcast)
-               goto bail;
-
-       mcast->mgid = *mgid;
-       INIT_LIST_HEAD(&mcast->qp_list);
-       init_waitqueue_head(&mcast->wait);
-       atomic_set(&mcast->refcount, 0);
-       mcast->n_attached = 0;
-
-bail:
-       return mcast;
-}
-
-static void ipath_mcast_free(struct ipath_mcast *mcast)
-{
-       struct ipath_mcast_qp *p, *tmp;
-
-       list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
-               ipath_mcast_qp_free(p);
-
-       kfree(mcast);
-}
-
-/**
- * ipath_mcast_find - search the global table for the given multicast GID
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
-{
-       struct rb_node *n;
-       unsigned long flags;
-       struct ipath_mcast *mcast;
-
-       spin_lock_irqsave(&mcast_lock, flags);
-       n = mcast_tree.rb_node;
-       while (n) {
-               int ret;
-
-               mcast = rb_entry(n, struct ipath_mcast, rb_node);
-
-               ret = memcmp(mgid->raw, mcast->mgid.raw,
-                            sizeof(union ib_gid));
-               if (ret < 0)
-                       n = n->rb_left;
-               else if (ret > 0)
-                       n = n->rb_right;
-               else {
-                       atomic_inc(&mcast->refcount);
-                       spin_unlock_irqrestore(&mcast_lock, flags);
-                       goto bail;
-               }
-       }
-       spin_unlock_irqrestore(&mcast_lock, flags);
-
-       mcast = NULL;
-
-bail:
-       return mcast;
-}
-
-/**
- * ipath_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added.  Return EEXIST if the GID was already in
- * the table but the QP was added.  Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int ipath_mcast_add(struct ipath_ibdev *dev,
-                          struct ipath_mcast *mcast,
-                          struct ipath_mcast_qp *mqp)
-{
-       struct rb_node **n = &mcast_tree.rb_node;
-       struct rb_node *pn = NULL;
-       int ret;
-
-       spin_lock_irq(&mcast_lock);
-
-       while (*n) {
-               struct ipath_mcast *tmcast;
-               struct ipath_mcast_qp *p;
-
-               pn = *n;
-               tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
-
-               ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
-                            sizeof(union ib_gid));
-               if (ret < 0) {
-                       n = &pn->rb_left;
-                       continue;
-               }
-               if (ret > 0) {
-                       n = &pn->rb_right;
-                       continue;
-               }
-
-               /* Search the QP list to see if this is already there. */
-               list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
-                       if (p->qp == mqp->qp) {
-                               ret = ESRCH;
-                               goto bail;
-                       }
-               }
-               if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
-                       ret = ENOMEM;
-                       goto bail;
-               }
-
-               tmcast->n_attached++;
-
-               list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
-               ret = EEXIST;
-               goto bail;
-       }
-
-       spin_lock(&dev->n_mcast_grps_lock);
-       if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
-               spin_unlock(&dev->n_mcast_grps_lock);
-               ret = ENOMEM;
-               goto bail;
-       }
-
-       dev->n_mcast_grps_allocated++;
-       spin_unlock(&dev->n_mcast_grps_lock);
-
-       mcast->n_attached++;
-
-       list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
-       atomic_inc(&mcast->refcount);
-       rb_link_node(&mcast->rb_node, pn, n);
-       rb_insert_color(&mcast->rb_node, &mcast_tree);
-
-       ret = 0;
-
-bail:
-       spin_unlock_irq(&mcast_lock);
-
-       return ret;
-}
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-       struct ipath_mcast *mcast;
-       struct ipath_mcast_qp *mqp;
-       int ret;
-
-       /*
-        * Allocate data structures since its better to do this outside of
-        * spin locks and it will most likely be needed.
-        */
-       mcast = ipath_mcast_alloc(gid);
-       if (mcast == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-       mqp = ipath_mcast_qp_alloc(qp);
-       if (mqp == NULL) {
-               ipath_mcast_free(mcast);
-               ret = -ENOMEM;
-               goto bail;
-       }
-       switch (ipath_mcast_add(dev, mcast, mqp)) {
-       case ESRCH:
-               /* Neither was used: can't attach the same QP twice. */
-               ipath_mcast_qp_free(mqp);
-               ipath_mcast_free(mcast);
-               ret = -EINVAL;
-               goto bail;
-       case EEXIST:            /* The mcast wasn't used */
-               ipath_mcast_free(mcast);
-               break;
-       case ENOMEM:
-               /* Exceeded the maximum number of mcast groups. */
-               ipath_mcast_qp_free(mqp);
-               ipath_mcast_free(mcast);
-               ret = -ENOMEM;
-               goto bail;
-       default:
-               break;
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-       struct ipath_mcast *mcast = NULL;
-       struct ipath_mcast_qp *p, *tmp;
-       struct rb_node *n;
-       int last = 0;
-       int ret;
-
-       spin_lock_irq(&mcast_lock);
-
-       /* Find the GID in the mcast table. */
-       n = mcast_tree.rb_node;
-       while (1) {
-               if (n == NULL) {
-                       spin_unlock_irq(&mcast_lock);
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               mcast = rb_entry(n, struct ipath_mcast, rb_node);
-               ret = memcmp(gid->raw, mcast->mgid.raw,
-                            sizeof(union ib_gid));
-               if (ret < 0)
-                       n = n->rb_left;
-               else if (ret > 0)
-                       n = n->rb_right;
-               else
-                       break;
-       }
-
-       /* Search the QP list. */
-       list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
-               if (p->qp != qp)
-                       continue;
-               /*
-                * We found it, so remove it, but don't poison the forward
-                * link until we are sure there are no list walkers.
-                */
-               list_del_rcu(&p->list);
-               mcast->n_attached--;
-
-               /* If this was the last attached QP, remove the GID too. */
-               if (list_empty(&mcast->qp_list)) {
-                       rb_erase(&mcast->rb_node, &mcast_tree);
-                       last = 1;
-               }
-               break;
-       }
-
-       spin_unlock_irq(&mcast_lock);
-
-       if (p) {
-               /*
-                * Wait for any list walkers to finish before freeing the
-                * list element.
-                */
-               wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
-               ipath_mcast_qp_free(p);
-       }
-       if (last) {
-               atomic_dec(&mcast->refcount);
-               wait_event(mcast->wait, !atomic_read(&mcast->refcount));
-               ipath_mcast_free(mcast);
-               spin_lock_irq(&dev->n_mcast_grps_lock);
-               dev->n_mcast_grps_allocated--;
-               spin_unlock_irq(&dev->n_mcast_grps_lock);
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-int ipath_mcast_tree_empty(void)
-{
-       return mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
deleted file mode 100644 (file)
index 1a7e20a..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only.  Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
-       return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
deleted file mode 100644 (file)
index 7b6e4c8..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only.  Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/processor.h>
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
-       int ret = 0;
-       u64 pioaddr, piolen;
-       unsigned bits;
-       const unsigned long addr = pci_resource_start(dd->pcidev, 0);
-       const size_t len = pci_resource_len(dd->pcidev, 0);
-
-       /*
-        * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
-        * chip.  Linux (possibly the hardware) requires it to be on a power
-        * of 2 address matching the length (which has to be a power of 2).
-        * For rev1, that means the base address, for rev2, it will be just
-        * the PIO buffers themselves.
-        * For chips with two sets of buffers, the calculations are
-        * somewhat more complicated; we need to sum, and the piobufbase
-        * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
-        * The buffers are still packed, so a single range covers both.
-        */
-       if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
-               unsigned long pio2kbase, pio4kbase;
-               pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
-               pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
-               if (pio2kbase < pio4kbase) { /* all, for now */
-                       pioaddr = addr + pio2kbase;
-                       piolen = pio4kbase - pio2kbase +
-                               dd->ipath_piobcnt4k * dd->ipath_4kalign;
-               } else {
-                       pioaddr = addr + pio4kbase;
-                       piolen = pio2kbase - pio4kbase +
-                               dd->ipath_piobcnt2k * dd->ipath_palign;
-               }
-       } else {  /* single buffer size (2K, currently) */
-               pioaddr = addr + dd->ipath_piobufbase;
-               piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
-                       dd->ipath_piobcnt4k * dd->ipath_4kalign;
-       }
-
-       for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-               /* do nothing */ ;
-
-       if (piolen != (1ULL << bits)) {
-               piolen >>= bits;
-               while (piolen >>= 1)
-                       bits++;
-               piolen = 1ULL << (bits + 1);
-       }
-       if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               ipath_dbg("pioaddr %llx not on right boundary for size "
-                         "%llx, fixing\n",
-                         (unsigned long long) pioaddr,
-                         (unsigned long long) piolen);
-               atmp = pioaddr & ~(piolen - 1);
-               if (atmp < addr || (atmp + piolen) > (addr + len)) {
-                       ipath_dev_err(dd, "No way to align address/size "
-                                     "(%llx/%llx), no WC mtrr\n",
-                                     (unsigned long long) atmp,
-                                     (unsigned long long) piolen << 1);
-                       ret = -ENODEV;
-               } else {
-                       ipath_dbg("changing WC base from %llx to %llx, "
-                                 "len from %llx to %llx\n",
-                                 (unsigned long long) pioaddr,
-                                 (unsigned long long) atmp,
-                                 (unsigned long long) piolen,
-                                 (unsigned long long) piolen << 1);
-                       pioaddr = atmp;
-                       piolen <<= 1;
-               }
-       }
-
-       if (!ret) {
-               dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
-               if (dd->wc_cookie < 0) {
-                       ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n");
-                       ret = -ENODEV;
-               } else if (dd->wc_cookie == 0)
-                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n");
-               else
-                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n");
-       }
-
-       return ret;
-}
-
-/**
- * ipath_disable_wc - disable write combining for MMIO writes to the device
- * @dd: infinipath device
- */
-void ipath_disable_wc(struct ipath_devdata *dd)
-{
-       arch_phys_wc_del(dd->wc_cookie);
-}
index efd6f4560d3e55ceeab571fdc1d0ebc242a38df2..7e8037e230b8e024ae2bb9d8797bba1d9eab7141 100644 (file)
@@ -1,7 +1,7 @@
 menu "Speakup console speech"
 
 config SPEAKUP
-       depends on VT
+       depends on VT && !MN10300
        tristate "Speakup core"
        ---help---
                This is the Speakup screen reader.  Think of it as a
index 63c59bc89b040fe68fcf01d364d5b636e8ac6707..30cf973f326dc467fba09f1a7c85a05acf0f9101 100644 (file)
@@ -264,8 +264,9 @@ static struct notifier_block vt_notifier_block = {
        .notifier_call = vt_notifier_call,
 };
 
-static unsigned char get_attributes(u16 *pos)
+static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
 {
+       pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
        return (u_char) (scr_readw(pos) >> 8);
 }
 
@@ -275,7 +276,7 @@ static void speakup_date(struct vc_data *vc)
        spk_y = spk_cy = vc->vc_y;
        spk_pos = spk_cp = vc->vc_pos;
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) spk_pos);
+       spk_attr = get_attributes(vc, (u_short *)spk_pos);
 }
 
 static void bleep(u_short val)
@@ -469,8 +470,12 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
        u16 ch = ' ';
 
        if (vc && pos) {
-               u16 w = scr_readw(pos);
-               u16 c = w & 0xff;
+               u16 w;
+               u16 c;
+
+               pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
+               w = scr_readw(pos);
+               c = w & 0xff;
 
                if (w & vc->vc_hi_font_mask)
                        c |= 0x100;
@@ -746,7 +751,7 @@ static int get_line(struct vc_data *vc)
        u_char tmp2;
 
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) spk_pos);
+       spk_attr = get_attributes(vc, (u_short *)spk_pos);
        for (i = 0; i < vc->vc_cols; i++) {
                buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
                tmp += 2;
@@ -811,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
        u_short saved_punc_mask = spk_punc_mask;
 
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) from);
+       spk_attr = get_attributes(vc, (u_short *)from);
        while (from < to) {
                buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
                from += 2;
@@ -886,7 +891,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
        sentmarks[bn][0] = &sentbuf[bn][0];
        i = 0;
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) start);
+       spk_attr = get_attributes(vc, (u_short *)start);
 
        while (start < end) {
                sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
@@ -1585,7 +1590,7 @@ static int count_highlight_color(struct vc_data *vc)
                u16 *ptr;
 
                for (ptr = start; ptr < end; ptr++) {
-                       ch = get_attributes(ptr);
+                       ch = get_attributes(vc, ptr);
                        bg = (ch & 0x70) >> 4;
                        speakup_console[vc_num]->ht.bgcount[bg]++;
                }
index aa5ab6c80ed4ffefc12aaca9c1337e403371d478..41ef099b7aa69f345d33998c46e2bce11ad691d9 100644 (file)
@@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
        struct tty_ldisc *ld;
        DECLARE_WAITQUEUE(wait, current);
 
-       ld = tty_ldisc_ref_wait(tty);
+       ld = tty_ldisc_ref(tty);
+       if (!ld)
+               goto tty_unref;
        tty_buffer_lock_exclusive(&vc->port);
 
        add_wait_queue(&vc->paste_wait, &wait);
@@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
 
        tty_buffer_unlock_exclusive(&vc->port);
        tty_ldisc_deref(ld);
+tty_unref:
        tty_kref_put(tty);
 }
 
index 3b5835b2812849f0c32055f065ef0be6a0c4d0d0..a5bbb338f275495c18bf6051adf60ee8a3bbcc1d 100644 (file)
@@ -6,6 +6,11 @@
 #include "spk_priv.h"
 #include "serialio.h"
 
+#include <linux/serial_core.h>
+/* WARNING:  Do not change this to <linux/serial.h> without testing that
+ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+#include <asm/serial.h>
+
 #ifndef SERIAL_PORT_DFNS
 #define SERIAL_PORT_DFNS
 #endif
@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
        int baud = 9600, quot = 0;
        unsigned int cval = 0;
        int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
-       const struct old_serial_port *ser = rs_table + index;
+       const struct old_serial_port *ser;
        int err;
 
+       if (index >= ARRAY_SIZE(rs_table)) {
+               pr_info("no port info for ttyS%d\n", index);
+               return NULL;
+       }
+       ser = rs_table + index;
+
        /*      Divisor, bytesize and parity */
        quot = ser->baud_base / baud;
        cval = cflag & (CSIZE | CSTOPB);
index 8cc4ac64a91c36347b9307addb88ae99d545d2b7..7c92c09be21386c3d60ff41b76a6694234c93432 100644 (file)
@@ -195,7 +195,7 @@ config IMX_THERMAL
          passive trip is crossed.
 
 config SPEAR_THERMAL
-       bool "SPEAr thermal sensor driver"
+       tristate "SPEAr thermal sensor driver"
        depends on PLAT_SPEAR || COMPILE_TEST
        depends on OF
        help
@@ -237,8 +237,8 @@ config DOVE_THERMAL
          framework.
 
 config DB8500_THERMAL
-       bool "DB8500 thermal management"
-       depends on ARCH_U8500
+       tristate "DB8500 thermal management"
+       depends on MFD_DB8500_PRCMU
        default y
        help
          Adds DB8500 thermal management implementation according to the thermal
index be4eedcb839ac22158fe180c2abc37df712d9511..9043f8f918529bd600eda1bbffe3bdf91f036ef4 100644 (file)
@@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
 
        sensor_np = of_node_get(dev->of_node);
 
-       for_each_child_of_node(np, child) {
+       for_each_available_child_of_node(np, child) {
                struct of_phandle_args sensor_specs;
                int ret, id;
 
-               /* Check whether child is enabled or not */
-               if (!of_device_is_available(child))
-                       continue;
-
                /* For now, thermal framework supports only 1 sensor per zone */
                ret = of_parse_phandle_with_args(child, "thermal-sensors",
                                                 "#thermal-sensor-cells",
@@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void)
                return 0; /* Run successfully on systems without thermal DT */
        }
 
-       for_each_child_of_node(np, child) {
+       for_each_available_child_of_node(np, child) {
                struct thermal_zone_device *zone;
                struct thermal_zone_params *tzp;
                int i, mask = 0;
                u32 prop;
 
-               /* Check whether child is enabled or not */
-               if (!of_device_is_available(child))
-                       continue;
-
                tz = thermal_of_build_thermal_zone(child);
                if (IS_ERR(tz)) {
                        pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void)
                return;
        }
 
-       for_each_child_of_node(np, child) {
+       for_each_available_child_of_node(np, child) {
                struct thermal_zone_device *zone;
 
-               /* Check whether child is enabled or not */
-               if (!of_device_is_available(child))
-                       continue;
-
                zone = thermal_zone_get_zone_by_name(child->name);
                if (IS_ERR(zone))
                        continue;
index 44b9c485157d8c6e624548ee7c7cfccacea241d9..0e735acea33afc7b8e747857b57046647d50e7d9 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reboot.h>
@@ -75,8 +76,10 @@ struct rcar_thermal_priv {
 #define rcar_has_irq_support(priv)     ((priv)->common->base)
 #define rcar_id_to_shift(priv)         ((priv)->id * 8)
 
+#define USE_OF_THERMAL 1
 static const struct of_device_id rcar_thermal_dt_ids[] = {
        { .compatible = "renesas,rcar-thermal", },
+       { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
        {},
 };
 MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
@@ -200,9 +203,9 @@ err_out_unlock:
        return ret;
 }
 
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
+                                        int *temp)
 {
-       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
        int tmp;
        int ret;
 
@@ -226,6 +229,20 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
        return 0;
 }
 
+static int rcar_thermal_of_get_temp(void *data, int *temp)
+{
+       struct rcar_thermal_priv *priv = data;
+
+       return rcar_thermal_get_current_temp(priv, temp);
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+{
+       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+       return rcar_thermal_get_current_temp(priv, temp);
+}
+
 static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
                                      int trip, enum thermal_trip_type *type)
 {
@@ -282,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone,
        return 0;
 }
 
+static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
+       .get_temp       = rcar_thermal_of_get_temp,
+};
+
 static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
        .get_temp       = rcar_thermal_get_temp,
        .get_trip_type  = rcar_thermal_get_trip_type,
@@ -318,14 +339,20 @@ static void rcar_thermal_work(struct work_struct *work)
 
        priv = container_of(work, struct rcar_thermal_priv, work.work);
 
-       rcar_thermal_get_temp(priv->zone, &cctemp);
+       ret = rcar_thermal_get_current_temp(priv, &cctemp);
+       if (ret < 0)
+               return;
+
        ret = rcar_thermal_update_temp(priv);
        if (ret < 0)
                return;
 
        rcar_thermal_irq_enable(priv);
 
-       rcar_thermal_get_temp(priv->zone, &nctemp);
+       ret = rcar_thermal_get_current_temp(priv, &nctemp);
+       if (ret < 0)
+               return;
+
        if (nctemp != cctemp)
                thermal_zone_device_update(priv->zone);
 }
@@ -403,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        struct rcar_thermal_priv *priv;
        struct device *dev = &pdev->dev;
        struct resource *res, *irq;
+       const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
+       unsigned long of_data = (unsigned long)of_id->data;
        int mres = 0;
        int i;
        int ret = -ENODEV;
@@ -463,7 +492,13 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                if (ret < 0)
                        goto error_unregister;
 
-               priv->zone = thermal_zone_device_register("rcar_thermal",
+               if (of_data == USE_OF_THERMAL)
+                       priv->zone = thermal_zone_of_sensor_register(
+                                               dev, i, priv,
+                                               &rcar_thermal_zone_of_ops);
+               else
+                       priv->zone = thermal_zone_device_register(
+                                               "rcar_thermal",
                                                1, 0, priv,
                                                &rcar_thermal_zone_ops, NULL, 0,
                                                idle);
index 534dd913666283fa13eecfaeb8823ea3d55ac0c8..81b35aace9de0439c4dd76c4c4c0d91597d3b3f3 100644 (file)
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
        .get_temp = thermal_get_temp,
 };
 
-#ifdef CONFIG_PM
-static int spear_thermal_suspend(struct device *dev)
+static int __maybe_unused spear_thermal_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
        return 0;
 }
 
-static int spear_thermal_resume(struct device *dev)
+static int __maybe_unused spear_thermal_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
                spear_thermal_resume);
index d9a5fc28fef4bbfe56850e258b726ca3464fd82b..b280abaad91b717639011f1407157eab5f3aaad1 100644 (file)
@@ -269,16 +269,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
 
 static void n_tty_check_unthrottle(struct tty_struct *tty)
 {
-       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-           tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
+       if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
                if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
                        return;
                if (!tty->count)
                        return;
                n_tty_kick_worker(tty);
-               n_tty_write_wakeup(tty->link);
-               if (waitqueue_active(&tty->link->write_wait))
-                       wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+               tty_wakeup(tty->link);
                return;
        }
 
index b3110040164ae64fa29e66fae2d7f5bd4d7d139f..2348fa6137070e19c19d97a5dc436b1a0b762441 100644 (file)
@@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 /* this is called once with whichever end is closed last */
 static void pty_unix98_shutdown(struct tty_struct *tty)
 {
-       devpts_kill_index(tty->driver_data, tty->index);
+       struct inode *ptmx_inode;
+
+       if (tty->driver->subtype == PTY_TYPE_MASTER)
+               ptmx_inode = tty->driver_data;
+       else
+               ptmx_inode = tty->link->driver_data;
+       devpts_kill_index(ptmx_inode, tty->index);
+       devpts_del_ref(ptmx_inode);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
        tty->driver_data = inode;
 
+       /*
+        * In the case where all references to ptmx inode are dropped and we
+        * still have /dev/tty opened pointing to the master/slave pair (ptmx
+        * is closed/released before /dev/tty), we must make sure that the inode
+        * is still valid when we call the final pty_unix98_shutdown, thus we
+        * hold an additional reference to the ptmx inode. For the same /dev/tty
+        * last close case, we also need to make sure the super_block isn't
+        * destroyed (devpts instance unmounted), before /dev/tty is closed and
+        * on its release devpts_kill_index is called.
+        */
+       devpts_add_ref(inode);
+
        tty_add_file(tty, filp);
 
        slave_inode = devpts_pty_new(inode,
index 4097f3f65b3bb1bfc3e3d4d6a40f05fb46c70618..7cd6f9a9054212d905e808bcfba2d76d676aaae2 100644 (file)
@@ -1379,6 +1379,9 @@ ce4100_serial_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_INTEL_BSW_UART1  0x228a
 #define PCI_DEVICE_ID_INTEL_BSW_UART2  0x228c
 
+#define PCI_DEVICE_ID_INTEL_BDW_UART1  0x9ce3
+#define PCI_DEVICE_ID_INTEL_BDW_UART2  0x9ce4
+
 #define BYT_PRV_CLK                    0x800
 #define BYT_PRV_CLK_EN                 (1 << 0)
 #define BYT_PRV_CLK_M_VAL_SHIFT                1
@@ -1461,11 +1464,13 @@ byt_serial_setup(struct serial_private *priv,
        switch (pdev->device) {
        case PCI_DEVICE_ID_INTEL_BYT_UART1:
        case PCI_DEVICE_ID_INTEL_BSW_UART1:
+       case PCI_DEVICE_ID_INTEL_BDW_UART1:
                rx_param->src_id = 3;
                tx_param->dst_id = 2;
                break;
        case PCI_DEVICE_ID_INTEL_BYT_UART2:
        case PCI_DEVICE_ID_INTEL_BSW_UART2:
+       case PCI_DEVICE_ID_INTEL_BDW_UART2:
                rx_param->src_id = 5;
                tx_param->dst_id = 4;
                break;
@@ -1936,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCIE_VENDOR_ID_WCH             0x1c00
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P  0x3250
 #define PCIE_DEVICE_ID_WCH_CH384_4S    0x3470
+#define PCIE_DEVICE_ID_WCH_CH382_2S    0x3253
 
 #define PCI_VENDOR_ID_PERICOM                  0x12D8
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7951       0x7951
@@ -2062,6 +2068,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = byt_serial_setup,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BDW_UART1,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = byt_serial_setup,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BDW_UART2,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = byt_serial_setup,
+       },
        /*
         * ITE
         */
@@ -2618,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
+       /* WCH CH382 2S card (16850 clone) */
+       {
+               .vendor         = PCIE_VENDOR_ID_WCH,
+               .device         = PCIE_DEVICE_ID_WCH_CH382_2S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch38x_setup,
+       },
        /* WCH CH382 2S1P card (16850 clone) */
        {
                .vendor         = PCIE_VENDOR_ID_WCH,
@@ -2936,6 +2964,7 @@ enum pci_board_num_t {
        pbn_fintek_4,
        pbn_fintek_8,
        pbn_fintek_12,
+       pbn_wch382_2,
        pbn_wch384_4,
        pbn_pericom_PI7C9X7951,
        pbn_pericom_PI7C9X7952,
@@ -3756,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
                .base_baud      = 115200,
                .first_offset   = 0x40,
        },
+       [pbn_wch382_2] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 2,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+               .first_offset   = 0xC0,
+       },
        [pbn_wch384_4] = {
                .flags          = FL_BASE0,
                .num_ports      = 4,
@@ -5506,6 +5542,16 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
                pbn_byt },
 
+       /* Intel Broadwell */
+       {       PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
+               PCI_ANY_ID,  PCI_ANY_ID,
+               PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+               pbn_byt },
+       {       PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
+               PCI_ANY_ID,  PCI_ANY_ID,
+               PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+               pbn_byt },
+
        /*
         * Intel Quark x1000
         */
@@ -5545,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_wch382_2 },
+
        {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_wch384_4 },
index b645f9228ed77b90ac6d4791e1b0ab27056a066b..fa49eb1e2fa2429f1c162d1464d06fb727a91d1c 100644 (file)
@@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port)
 
 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
 
-static void wait_for_xmitr(struct uart_omap_port *up)
+static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
 {
        unsigned int status, tmout = 10000;
 
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
 
 /* Enable or disable the rs485 support */
 static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
 {
        struct uart_omap_port *up = to_uart_omap_port(port);
        unsigned int mode;
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
        up->ier = 0;
        serial_out(up, UART_IER, 0);
 
+       /* Clamp the delays to [0, 100ms] */
+       rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+       rs485->delay_rts_after_send  = min(rs485->delay_rts_after_send, 100U);
+
        /* store new config */
-       port->rs485 = *rs485conf;
+       port->rs485 = *rs485;
 
        /*
         * Just as a precaution, only allow rs485
index 892c923547450518cb5d7a4714f570a6803253bd..a7eacef1bd2216ef697c12d046cb8975f9fefb20 100644 (file)
@@ -1463,13 +1463,13 @@ static int tty_reopen(struct tty_struct *tty)
 {
        struct tty_driver *driver = tty->driver;
 
-       if (!tty->count)
-               return -EIO;
-
        if (driver->type == TTY_DRIVER_TYPE_PTY &&
            driver->subtype == PTY_TYPE_MASTER)
                return -EIO;
 
+       if (!tty->count)
+               return -EAGAIN;
+
        if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
                return -EBUSY;
 
@@ -2065,9 +2065,13 @@ retry_open:
 
                if (tty) {
                        mutex_unlock(&tty_mutex);
-                       tty_lock(tty);
-                       /* safe to drop the kref from tty_driver_lookup_tty() */
-                       tty_kref_put(tty);
+                       retval = tty_lock_interruptible(tty);
+                       tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
+                       if (retval) {
+                               if (retval == -EINTR)
+                                       retval = -ERESTARTSYS;
+                               goto err_unref;
+                       }
                        retval = tty_reopen(tty);
                        if (retval < 0) {
                                tty_unlock(tty);
@@ -2083,7 +2087,11 @@ retry_open:
 
        if (IS_ERR(tty)) {
                retval = PTR_ERR(tty);
-               goto err_file;
+               if (retval != -EAGAIN || signal_pending(current))
+                       goto err_file;
+               tty_free_file(filp);
+               schedule();
+               goto retry_open;
        }
 
        tty_add_file(tty, filp);
@@ -2152,6 +2160,7 @@ retry_open:
        return 0;
 err_unlock:
        mutex_unlock(&tty_mutex);
+err_unref:
        /* after locks to avoid deadlock */
        if (!IS_ERR_OR_NULL(driver))
                tty_driver_kref_put(driver);
@@ -2648,6 +2657,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
        return ret;
 }
 
+/**
+ *     tiocgetd        -       get line discipline
+ *     @tty: tty device
+ *     @p: pointer to user data
+ *
+ *     Retrieves the line discipline id directly from the ldisc.
+ *
+ *     Locking: waits for ldisc reference (in case the line discipline
+ *             is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+       struct tty_ldisc *ld;
+       int ret;
+
+       ld = tty_ldisc_ref_wait(tty);
+       ret = put_user(ld->ops->num, p);
+       tty_ldisc_deref(ld);
+       return ret;
+}
+
 /**
  *     send_break      -       performed time break
  *     @tty: device to break on
@@ -2874,7 +2905,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case TIOCGSID:
                return tiocgsid(tty, real_tty, p);
        case TIOCGETD:
-               return put_user(tty->ldisc->ops->num, (int __user *)p);
+               return tiocgetd(tty, p);
        case TIOCSETD:
                return tiocsetd(tty, p);
        case TIOCVHANGUP:
index 77703a3912075a7b740eb4626a5159d55ea6f57a..dfa9ec03fa8e06dfe03a6e0ae04c62a2ac774103 100644 (file)
@@ -19,6 +19,19 @@ void __lockfunc tty_lock(struct tty_struct *tty)
 }
 EXPORT_SYMBOL(tty_lock);
 
+int tty_lock_interruptible(struct tty_struct *tty)
+{
+       int ret;
+
+       if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
+               return -EIO;
+       tty_kref_get(tty);
+       ret = mutex_lock_interruptible(&tty->legacy_mutex);
+       if (ret)
+               tty_kref_put(tty);
+       return ret;
+}
+
 void __lockfunc tty_unlock(struct tty_struct *tty)
 {
        if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
index e7cbc44eef57529a23c479413538f1c2f924b55c..bd51bdd0a7bf2536617b1a27e3c846105c31a821 100644 (file)
@@ -4250,6 +4250,7 @@ unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
 {
        return screenpos(vc, 2 * w_offset, viewed);
 }
+EXPORT_SYMBOL_GPL(screen_pos);
 
 void getconsxy(struct vc_data *vc, unsigned char *p)
 {
index 26ca4f910cb020aae539f29b260a98fe1eee92b0..fa4e23930614a47ac14ece43e863286fadb91402 100644 (file)
@@ -428,7 +428,8 @@ static void acm_read_bulk_callback(struct urb *urb)
                set_bit(rb->index, &acm->read_urbs_free);
                dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
                                                        __func__, status);
-               return;
+               if ((status != -ENOENT) || (urb->actual_length == 0))
+                       return;
        }
 
        usb_mark_last_busy(acm->dev);
@@ -1404,6 +1405,8 @@ made_compressed_probe:
                                usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
                                NULL, acm->writesize, acm_write_bulk, snd);
                snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+               if (quirks & SEND_ZERO_PACKET)
+                       snd->urb->transfer_flags |= URB_ZERO_PACKET;
                snd->instance = acm;
        }
 
@@ -1838,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
        },
 #endif
 
+       /*Samsung phone in firmware update mode */
+       { USB_DEVICE(0x04e8, 0x685d),
+       .driver_info = IGNORE_DEVICE,
+       },
+
        /* Exclude Infineon Flash Loader utility */
        { USB_DEVICE(0x058b, 0x0041),
        .driver_info = IGNORE_DEVICE,
@@ -1861,6 +1869,10 @@ static const struct usb_device_id acm_ids[] = {
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
                USB_CDC_ACM_PROTO_AT_CDMA) },
 
+       { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
+       .driver_info = SEND_ZERO_PACKET,
+       },
+
        { }
 };
 
index dd9af38e7cda612106e09c89a298c4397ccf7494..ccfaba9ab4e49cd4a009132397d9467bc949bfd0 100644 (file)
@@ -134,3 +134,4 @@ struct acm {
 #define IGNORE_DEVICE                  BIT(5)
 #define QUIRK_CONTROL_LINE_STATE       BIT(6)
 #define CLEAR_HALT_CONDITIONS          BIT(7)
+#define SEND_ZERO_PACKET               BIT(8)
index 9eb1cff28bd4b2499e4dfd9ed8b91e53901df184..c3640f8a8fb34440a8cb96b0320c26835131cbef 100644 (file)
@@ -28,7 +28,6 @@
 #ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
 #include <asm/prom.h>
 #endif
 
index 51b436918f7892b1249c7ec1b19dad515a9a5755..350dcd9af5d86eb2181e6cc8f77f25a33f712cce 100644 (file)
@@ -5401,7 +5401,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
        }
 
        bos = udev->bos;
-       udev->bos = NULL;
 
        for (i = 0; i < SET_CONFIG_TRIES; ++i) {
 
@@ -5494,8 +5493,11 @@ done:
        usb_set_usb2_hardware_lpm(udev, 1);
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
-       usb_release_bos_descriptor(udev);
-       udev->bos = bos;
+       /* release the new BOS descriptor allocated  by hub_port_init() */
+       if (udev->bos != bos) {
+               usb_release_bos_descriptor(udev);
+               udev->bos = bos;
+       }
        return 0;
 
 re_enumerate:
index 39a0fa8a4c0aea17b9ecf330c127b110b4b54ba4..e991d55914db649804a6357821f42e78355e6e5b 100644 (file)
@@ -572,12 +572,6 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
        set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
        clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
 
-       /*
-        * If the force mode bit is already set, don't set it.
-        */
-       if ((gusbcfg & set) && !(gusbcfg & clear))
-               return false;
-
        gusbcfg &= ~clear;
        gusbcfg |= set;
        dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
@@ -3278,9 +3272,6 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
 /**
  * During device initialization, read various hardware configuration
  * registers and interpret the contents.
- *
- * This should be called during driver probe. It will perform a core
- * soft reset in order to get the reset values of the parameters.
  */
 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
 {
@@ -3288,7 +3279,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        unsigned width;
        u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
        u32 grxfsiz;
-       int retval;
 
        /*
         * Attempt to ensure this device is really a DWC_otg Controller.
@@ -3308,10 +3298,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
                hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
                hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
 
-       retval = dwc2_core_reset(hsotg);
-       if (retval)
-               return retval;
-
        hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
        hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
        hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
index 510f787434b3d574e40cfe4a5a41511c92f1ad50..690b9fd98b55165a8b1d6a7b8bce30f730ac6805 100644 (file)
@@ -530,7 +530,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (retval)
                return retval;
 
-       /* Reset the controller and detect hardware config values */
+       /*
+        * Reset before dwc2_get_hwparams() then it could get power-on real
+        * reset value form registers.
+        */
+       dwc2_core_reset_and_force_dr_mode(hsotg);
+
+       /* Detect config values from hardware */
        retval = dwc2_get_hwparams(hsotg);
        if (retval)
                goto error;
index af023a81a0b02160f85e4800594b49aa318190c2..7d1dd82a95ac7325c3da0b45d0d855a31f7f9965 100644 (file)
@@ -2789,6 +2789,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        dwc->gadget.speed               = USB_SPEED_UNKNOWN;
        dwc->gadget.sg_supported        = true;
        dwc->gadget.name                = "dwc3-gadget";
+       dwc->gadget.is_otg              = dwc->dr_mode == USB_DR_MODE_OTG;
 
        /*
         * FIXME We might be setting max_speed to <SUPER, however versions
index 04ce6b156b350e5dd0f9f9a321c79eb4d173b656..e0244fb3903dc85287f509bbac97600734c9b5af 100644 (file)
@@ -112,12 +112,16 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id)
        offset = start;
        if (!start || start == XHCI_HCC_PARAMS_OFFSET) {
                val = readl(base + XHCI_HCC_PARAMS_OFFSET);
+               if (val == ~0)
+                       return 0;
                offset = XHCI_HCC_EXT_CAPS(val) << 2;
                if (!offset)
                        return 0;
        };
        do {
                val = readl(base + offset);
+               if (val == ~0)
+                       return 0;
                if (XHCI_EXT_CAPS_ID(val) == id && offset != start)
                        return offset;
 
index c30de7c39f44088e302b257a480a6c7b159ff167..73f763c4f5f591a2c19053083dedf7b6aa252f31 100644 (file)
@@ -275,8 +275,9 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
                return false;
 
        /*
-        * for LS & FS periodic endpoints which its device don't attach
-        * to TT are also ignored, root-hub will schedule them directly
+        * for LS & FS periodic endpoints which its device is not behind
+        * a TT are also ignored, root-hub will schedule them directly,
+        * but need set @bpkts field of endpoint context to 1.
         */
        if (is_fs_or_ls(speed) && !has_tt)
                return false;
@@ -339,8 +340,17 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
                GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
                usb_endpoint_dir_in(&ep->desc), ep);
 
-       if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
+       if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
+               /*
+                * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
+                * device does not connected through an external HS hub
+                */
+               if (usb_endpoint_xfer_int(&ep->desc)
+                       || usb_endpoint_xfer_isoc(&ep->desc))
+                       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
+
                return 0;
+       }
 
        bw_index = get_bw_index(xhci, udev, ep);
        sch_bw = &sch_array[bw_index];
index c9ab6a44c34aef05968c0b7e053e63cafd392d8f..9532f5aef71bfe310b499db66a266e9a2c7b32f4 100644 (file)
@@ -696,9 +696,24 @@ static int xhci_mtk_remove(struct platform_device *dev)
 }
 
 #ifdef CONFIG_PM_SLEEP
+/*
+ * if ip sleep fails, and all clocks are disabled, access register will hang
+ * AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
+ * and no need to check whether ip sleep failed or not; this will cause SPM
+ * to wake up system immediately after system suspend complete if ip sleep
+ * fails, it is what we wanted.
+ */
 static int xhci_mtk_suspend(struct device *dev)
 {
        struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+       struct usb_hcd *hcd = mtk->hcd;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+       xhci_dbg(xhci, "%s: stop port polling\n", __func__);
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+       clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+       del_timer_sync(&xhci->shared_hcd->rh_timer);
 
        xhci_mtk_host_disable(mtk);
        xhci_mtk_phy_power_off(mtk);
@@ -710,11 +725,19 @@ static int xhci_mtk_suspend(struct device *dev)
 static int xhci_mtk_resume(struct device *dev)
 {
        struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+       struct usb_hcd *hcd = mtk->hcd;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
        usb_wakeup_disable(mtk);
        xhci_mtk_clks_enable(mtk);
        xhci_mtk_phy_power_on(mtk);
        xhci_mtk_host_enable(mtk);
+
+       xhci_dbg(xhci, "%s: restart port polling\n", __func__);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
+       set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+       usb_hcd_poll_rh_status(xhci->shared_hcd);
        return 0;
 }
 
index 58c43ed7ff3b6c5291bab17bb646c2bbefcf67b6..f0640b7a1c42e2b82c4b8c6071fa0c43561ab7e7 100644 (file)
@@ -28,7 +28,9 @@
 #include "xhci.h"
 #include "xhci-trace.h"
 
-#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define SSIC_PORT_NUM          2
+#define SSIC_PORT_CFG2         0x880c
+#define SSIC_PORT_CFG2_OFFSET  0x30
 #define PROG_DONE              (1 << 30)
 #define SSIC_PORT_UNUSED       (1 << 31)
 
@@ -45,6 +47,7 @@
 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI            0x22b5
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI             0x0aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -151,9 +154,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
-                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
                xhci->quirks |= XHCI_PME_STUCK_QUIRK;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+               xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_EJ168) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -312,22 +320,20 @@ static void xhci_pci_remove(struct pci_dev *dev)
  * SSIC PORT need to be marked as "unused" before putting xHCI
  * into D3. After D3 exit, the SSIC port need to be marked as "used".
  * Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
  */
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
        u32 val;
        void __iomem *reg;
+       int i;
 
-       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
-
-               reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+       for (i = 0; i < SSIC_PORT_NUM; i++) {
+               reg = (void __iomem *) xhci->cap_regs +
+                               SSIC_PORT_CFG2 +
+                               i * SSIC_PORT_CFG2_OFFSET;
 
-               /* Notify SSIC that SSIC profile programming is not done */
+               /* Notify SSIC that SSIC profile programming is not done. */
                val = readl(reg) & ~PROG_DONE;
                writel(val, reg);
 
@@ -344,6 +350,17 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
                writel(val, reg);
                readl(reg);
        }
+}
+
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       void __iomem *reg;
+       u32 val;
 
        reg = (void __iomem *) xhci->cap_regs + 0x80a4;
        val = readl(reg);
@@ -355,6 +372,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
+       int                     ret;
 
        /*
         * Systems with the TI redriver that loses port status change events
@@ -364,9 +382,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
                pdev->no_d3cold = true;
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(hcd, true);
+               xhci_pme_quirk(hcd);
+
+       if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+               xhci_ssic_port_unused_quirk(hcd, true);
 
-       return xhci_suspend(xhci, do_wakeup);
+       ret = xhci_suspend(xhci, do_wakeup);
+       if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
+               xhci_ssic_port_unused_quirk(hcd, false);
+
+       return ret;
 }
 
 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
@@ -396,8 +421,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL)
                usb_enable_intel_xhci_ports(pdev);
 
+       if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+               xhci_ssic_port_unused_quirk(hcd, false);
+
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(hcd, false);
+               xhci_pme_quirk(hcd);
 
        retval = xhci_resume(xhci, hibernated);
        return retval;
index 770b6b08879790ec2754e26fbb424fcd03c1205c..d39d6bf1d090dad06687892ed723ea3a5e993083 100644 (file)
@@ -184,7 +184,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
                struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
 
                /* Just copy data for now */
-               *priv = *priv_match;
+               if (priv_match)
+                       *priv = *priv_match;
        }
 
        if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) {
index f1c21c40b4a674e6a8fad98ac34c0b9385df76be..3915657e6078b66211fd699eab9bf227752f1ffc 100644 (file)
@@ -2193,10 +2193,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                }
        /* Fast path - was this the last TRB in the TD for this URB? */
        } else if (event_trb == td->last_trb) {
-               if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
-                       return finish_td(xhci, td, event_trb, event, ep,
-                                        status, false);
-
                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
@@ -2248,12 +2244,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                        td->urb->actual_length +=
                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
-               if (trb_comp_code == COMP_SHORT_TX) {
-                       xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
-                       td->urb_length_set = true;
-                       return 0;
-               }
        }
 
        return finish_td(xhci, td, event_trb, event, ep, status, false);
index 26a44c0e969e621be5d2a6e2d947cf9292505716..0c8087d3c3138f72e6eeaedc928c09453ab32285 100644 (file)
@@ -1554,7 +1554,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "HW died, freeing TD.");
                urb_priv = urb->hcpriv;
-               for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+               for (i = urb_priv->td_cnt;
+                    i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+                    i++) {
                        td = urb_priv->td[i];
                        if (!list_empty(&td->td_list))
                                list_del_init(&td->td_list);
index 9be7348872baab804691c7fd10f31115a0c7e3fe..cc651383ce5a85d713c66b032a0aa30bd4331dc8 100644 (file)
@@ -1631,6 +1631,7 @@ struct xhci_hcd {
 #define XHCI_BROKEN_STREAMS    (1 << 19)
 #define XHCI_PME_STUCK_QUIRK   (1 << 20)
 #define XHCI_MTK_HOST          (1 << 21)
+#define XHCI_SSIC_PORT_UNUSED  (1 << 22)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index b2685e75a6835fe2b4615b3f86f7aef378fedcae..3eaa4ba6867d408a516abec7bb4180d5c9f05d40 100644 (file)
@@ -348,7 +348,9 @@ static int ux500_suspend(struct device *dev)
        struct ux500_glue       *glue = dev_get_drvdata(dev);
        struct musb             *musb = glue_to_musb(glue);
 
-       usb_phy_set_suspend(musb->xceiv, 1);
+       if (musb)
+               usb_phy_set_suspend(musb->xceiv, 1);
+
        clk_disable_unprepare(glue->clk);
 
        return 0;
@@ -366,7 +368,8 @@ static int ux500_resume(struct device *dev)
                return ret;
        }
 
-       usb_phy_set_suspend(musb->xceiv, 0);
+       if (musb)
+               usb_phy_set_suspend(musb->xceiv, 0);
 
        return 0;
 }
index 0d19a6d61a71f7ccb7a55e1b90a7d5d5f27db535..970a30e155cb51bfd1d15126d8cdc65f9adf579c 100644 (file)
@@ -1599,6 +1599,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
                                                &motg->id.nb);
                if (ret < 0) {
                        dev_err(&pdev->dev, "register ID notifier failed\n");
+                       extcon_unregister_notifier(motg->vbus.extcon,
+                                                  EXTCON_USB, &motg->vbus.nb);
                        return ret;
                }
 
@@ -1660,15 +1662,6 @@ static int msm_otg_probe(struct platform_device *pdev)
        if (!motg)
                return -ENOMEM;
 
-       pdata = dev_get_platdata(&pdev->dev);
-       if (!pdata) {
-               if (!np)
-                       return -ENXIO;
-               ret = msm_otg_read_dt(pdev, motg);
-               if (ret)
-                       return ret;
-       }
-
        motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
                                     GFP_KERNEL);
        if (!motg->phy.otg)
@@ -1710,6 +1703,15 @@ static int msm_otg_probe(struct platform_device *pdev)
        if (!motg->regs)
                return -ENOMEM;
 
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               if (!np)
+                       return -ENXIO;
+               ret = msm_otg_read_dt(pdev, motg);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * NOTE: The PHYs can be multiplexed between the chipidea controller
         * and the dwc3 controller, using a single bit. It is important that
@@ -1717,8 +1719,10 @@ static int msm_otg_probe(struct platform_device *pdev)
         */
        if (motg->phy_number) {
                phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
-               if (!phy_select)
-                       return -ENOMEM;
+               if (!phy_select) {
+                       ret = -ENOMEM;
+                       goto unregister_extcon;
+               }
                /* Enable second PHY with the OTG port */
                writel(0x1, phy_select);
        }
@@ -1728,7 +1732,8 @@ static int msm_otg_probe(struct platform_device *pdev)
        motg->irq = platform_get_irq(pdev, 0);
        if (motg->irq < 0) {
                dev_err(&pdev->dev, "platform_get_irq failed\n");
-               return motg->irq;
+               ret = motg->irq;
+               goto unregister_extcon;
        }
 
        regs[0].supply = "vddcx";
@@ -1737,7 +1742,7 @@ static int msm_otg_probe(struct platform_device *pdev)
 
        ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
        if (ret)
-               return ret;
+               goto unregister_extcon;
 
        motg->vddcx = regs[0].consumer;
        motg->v3p3  = regs[1].consumer;
@@ -1834,6 +1839,12 @@ disable_clks:
        clk_disable_unprepare(motg->clk);
        if (!IS_ERR(motg->core_clk))
                clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+       extcon_unregister_notifier(motg->id.extcon,
+                                  EXTCON_USB_HOST, &motg->id.nb);
+       extcon_unregister_notifier(motg->vbus.extcon,
+                                  EXTCON_USB, &motg->vbus.nb);
+
        return ret;
 }
 
index c2936dc48ca7b45b0e5c186859d2879569719323..00bfea01be6501d9b53ccc7975b069c24bc57d7b 100644 (file)
@@ -220,7 +220,7 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
 /* Return true if the vbus is there */
 static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
 {
-       unsigned int vbus_value;
+       unsigned int vbus_value = 0;
 
        if (!mxs_phy->regmap_anatop)
                return false;
index 9b90ad747d8766797bd1737ea8f0be9347a5560d..987813b8a7f91e27b6a582a44ca88254dbd49c13 100644 (file)
@@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
        { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
        { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+       { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
        { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
        { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
        { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
index a5a0376bbd48c2596b908aa0409befb1136bf128..8c660ae401d8267f951516e33fe8188f0f23e060 100644 (file)
@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
+       { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
        { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
 
        /* Papouch devices based on FTDI chip */
index 67c6d446973092ddf98dd703152b7cd0d8dec24c..a84df2513994a57377cc41105f3078e36d2351a2 100644 (file)
  */
 #define RATOC_VENDOR_ID                0x0584
 #define RATOC_PRODUCT_ID_USB60F        0xb020
+#define RATOC_PRODUCT_ID_SCU18 0xb03a
 
 /*
  * Infineon Technologies
index e3c3f57c2d828d42ecb35e12d8a1a048974d9ee6..619607323bfdfb0a678e470ed31757ba6028676d 100644 (file)
@@ -368,6 +368,16 @@ static int mxu1_port_probe(struct usb_serial_port *port)
        return 0;
 }
 
+static int mxu1_port_remove(struct usb_serial_port *port)
+{
+       struct mxu1_port *mxport;
+
+       mxport = usb_get_serial_port_data(port);
+       kfree(mxport);
+
+       return 0;
+}
+
 static int mxu1_startup(struct usb_serial *serial)
 {
        struct mxu1_device *mxdev;
@@ -427,6 +437,14 @@ err_free_mxdev:
        return err;
 }
 
+static void mxu1_release(struct usb_serial *serial)
+{
+       struct mxu1_device *mxdev;
+
+       mxdev = usb_get_serial_data(serial);
+       kfree(mxdev);
+}
+
 static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
                           u8 mask, u8 byte)
 {
@@ -957,7 +975,9 @@ static struct usb_serial_driver mxu11x0_device = {
        .id_table               = mxu1_idtable,
        .num_ports              = 1,
        .port_probe             = mxu1_port_probe,
+       .port_remove            = mxu1_port_remove,
        .attach                 = mxu1_startup,
+       .release                = mxu1_release,
        .open                   = mxu1_open,
        .close                  = mxu1_close,
        .ioctl                  = mxu1_ioctl,
index f2280606b73c0c897d83d7738747b3d6f97a2c71..db86e512e0fcb3af0548eb9907b04ce68db3a8dc 100644 (file)
@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_CC864_SINGLE             0x1006
 #define TELIT_PRODUCT_DE910_DUAL               0x1010
 #define TELIT_PRODUCT_UE910_V2                 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG0            0x1042
+#define TELIT_PRODUCT_LE922_USBCFG3            0x1043
 #define TELIT_PRODUCT_LE920                    0x1200
 #define TELIT_PRODUCT_LE910                    0x1201
 
@@ -615,6 +617,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
        .reserved = BIT(1) | BIT(5),
 };
 
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+       .sendsetup = BIT(2),
+       .reserved = BIT(0) | BIT(1) | BIT(3),
+};
+
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1) | BIT(2) | BIT(3),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1160,6 +1172,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
                .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1679,7 +1695,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
                .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
-       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
                .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
index 60afb39eb73c0b95d261ec367890a306621fd2de..337a0be89fcf0767f67731a9b0a50700e1df9318 100644 (file)
@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
                (serial->num_interrupt_in == 0))
                return 0;
 
+       if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
        /*
        * It appears that Treos and Kyoceras want to use the
        * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
         */
 
        /* some sanity check */
-       if (serial->num_ports < 2)
-               return -1;
+       if (serial->num_bulk_out < 2) {
+               dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
+               return -ENODEV;
+       }
 
        /* port 0 now uses the modified endpoint Address */
        port = serial->port[0];
index 82f25cc1c460dee73c37b2b19895fb7d89bfb0d3..ecca316386f57fa64d04746d4b8326fa4c241ebd 100644 (file)
@@ -123,8 +123,8 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
        /*
         * With noiommu enabled, an IOMMU group will be created for a device
         * that doesn't already have one and doesn't have an iommu_ops on their
-        * bus.  We use iommu_present() again in the main code to detect these
-        * fake groups.
+        * bus.  We set iommudata simply to be able to identify these groups
+        * as special use and for reclamation later.
         */
        if (group || !noiommu || iommu_present(dev->bus))
                return group;
@@ -134,6 +134,7 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
                return NULL;
 
        iommu_group_set_name(group, "vfio-noiommu");
+       iommu_group_set_iommudata(group, &noiommu, NULL);
        ret = iommu_group_add_device(group, dev);
        iommu_group_put(group);
        if (ret)
@@ -158,7 +159,7 @@ EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
 {
 #ifdef CONFIG_VFIO_NOIOMMU
-       if (!iommu_present(dev->bus))
+       if (iommu_group_get_iommudata(group) == &noiommu)
                iommu_group_remove_device(dev);
 #endif
 
@@ -190,16 +191,10 @@ static long vfio_noiommu_ioctl(void *iommu_data,
        return -ENOTTY;
 }
 
-static int vfio_iommu_present(struct device *dev, void *unused)
-{
-       return iommu_present(dev->bus) ? 1 : 0;
-}
-
 static int vfio_noiommu_attach_group(void *iommu_data,
                                     struct iommu_group *iommu_group)
 {
-       return iommu_group_for_each_dev(iommu_group, NULL,
-                                       vfio_iommu_present) ? -EINVAL : 0;
+       return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
 }
 
 static void vfio_noiommu_detach_group(void *iommu_data,
@@ -323,8 +318,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
 /**
  * Group objects - create, release, get, put, search
  */
-static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
-                                           bool iommu_present)
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 {
        struct vfio_group *group, *tmp;
        struct device *dev;
@@ -342,7 +336,9 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
        atomic_set(&group->container_users, 0);
        atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
-       group->noiommu = !iommu_present;
+#ifdef CONFIG_VFIO_NOIOMMU
+       group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
+#endif
 
        group->nb.notifier_call = vfio_iommu_group_notifier;
 
@@ -767,7 +763,7 @@ int vfio_add_group_dev(struct device *dev,
 
        group = vfio_group_get_from_iommu(iommu_group);
        if (!group) {
-               group = vfio_create_group(iommu_group, iommu_present(dev->bus));
+               group = vfio_create_group(iommu_group);
                if (IS_ERR(group)) {
                        iommu_group_put(iommu_group);
                        return PTR_ERR(group);
index c42ce2fdfd441de6a88f7e3ecbc46fa2514028d7..0a4626886b00c1402bc2bbc234fe691bb549ff8a 100644 (file)
@@ -68,7 +68,6 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include "../macmodes.h"
 #endif
 
index ce0b1d05a388cefc309e177de53b6b936226ae74..218339a4edaac46e5b223d07f12f335b8966c66f 100644 (file)
@@ -76,7 +76,6 @@
 
 #ifdef CONFIG_PPC
 
-#include <asm/pci-bridge.h>
 #include "../macmodes.h"
 
 #ifdef CONFIG_BOOTX_TEXT
index 9b167f7ef6c698854a8fe4d05fdf4c3d2e9fffc7..4363c64d74e8c1481842735aa2fc0d1831344b05 100644 (file)
@@ -33,7 +33,6 @@
 #if defined(CONFIG_PPC)
 #include <linux/nvram.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include "macmodes.h"
 #endif
 
index 09b02cd1eb0e270f7dc654116167101202b00a27..7a90ea2c4613c772d4b14584a483fd2bbaed92bc 100644 (file)
@@ -47,7 +47,6 @@
 
 #if defined(CONFIG_PPC_PMAC)
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include "../macmodes.h"
 #endif
 
index 43a0a52fc52703c18244dc2b25e54d6dd22c1f61..fb60a8f0cc94c8103d8dc141b25c5eecec5db39c 100644 (file)
 #include <linux/pci.h>
 #include <asm/io.h>
 
-#ifdef CONFIG_PPC64
-#include <asm/pci-bridge.h>
-#endif
-
 #ifdef CONFIG_PPC32
 #include <asm/bootx.h>
 #endif
index 36205c27c4d0f19ab61a4aa4175de3dd8785de66..f6bed86c17f96f16cfa75f35a15b790287e7ab1f 100644 (file)
@@ -545,6 +545,7 @@ err_enable_device:
 static void virtio_pci_remove(struct pci_dev *pci_dev)
 {
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+       struct device *dev = get_device(&vp_dev->vdev.dev);
 
        unregister_virtio_device(&vp_dev->vdev);
 
@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
                virtio_pci_modern_remove(vp_dev);
 
        pci_disable_device(pci_dev);
+       put_device(dev);
 }
 
 static struct pci_driver virtio_pci_driver = {
index 4f0e7be0da346c90d8559d9653bd41927f722b22..80825a7e8e48e1ebd06af14a1bcf208acb733daf 100644 (file)
@@ -145,7 +145,8 @@ config MENF21BMC_WATCHDOG
 config TANGOX_WATCHDOG
        tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
        select WATCHDOG_CORE
-       depends on ARCH_TANGOX || COMPILE_TEST
+       depends on ARCH_TANGO || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          Support for the watchdog in Sigma Designs SMP86xx (tango3)
          and SMP87xx (tango4) family chips.
@@ -618,6 +619,7 @@ config DIGICOLOR_WATCHDOG
 config LPC18XX_WATCHDOG
        tristate "LPC18xx/43xx Watchdog"
        depends on ARCH_LPC18XX || COMPILE_TEST
+       depends on HAS_IOMEM
        select WATCHDOG_CORE
        help
          Say Y here if to include support for the watchdog timer
@@ -1374,6 +1376,7 @@ config BCM_KONA_WDT_DEBUG
 config BCM7038_WDT
        tristate "BCM7038 Watchdog"
        select WATCHDOG_CORE
+       depends on HAS_IOMEM
        help
         Watchdog driver for the built-in hardware in Broadcom 7038 SoCs.
 
@@ -1383,6 +1386,7 @@ config IMGPDC_WDT
        tristate "Imagination Technologies PDC Watchdog Timer"
        depends on HAS_IOMEM
        depends on METAG || MIPS || COMPILE_TEST
+       select WATCHDOG_CORE
        help
          Driver for Imagination Technologies PowerDown Controller
          Watchdog Timer.
@@ -1565,6 +1569,17 @@ config WATCHDOG_RIO
          machines.  The watchdog timeout period is normally one minute but
          can be changed with a boot-time parameter.
 
+config WATCHDOG_SUN4V
+       tristate "Sun4v Watchdog support"
+       select WATCHDOG_CORE
+       depends on SPARC64
+       help
+         Say Y here to support the hypervisor watchdog capability embedded
+         in the SPARC sun4v architecture.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sun4v_wdt.
+
 # XTENSA Architecture
 
 # Xen Architecture
index f566753256abbbb74535337d3f9a9cc0442fd78e..f6a6a387c6c71f7a5e9e2cda4cc91ae3f39edf2c 100644 (file)
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
 
 obj-$(CONFIG_WATCHDOG_RIO)             += riowd.o
 obj-$(CONFIG_WATCHDOG_CP1XXX)          += cpwd.o
+obj-$(CONFIG_WATCHDOG_SUN4V)           += sun4v_wdt.o
 
 # XTENSA Architecture
 
index f36ca4be07207a9fd200d92577e9242a10e774df..ac5840d9689aed0f79fafa82e6a56e4515bb1b27 100644 (file)
@@ -292,4 +292,4 @@ MODULE_PARM_DESC(nodelay,
                 "Force selection of a timeout setting without initial delay "
                 "(max6373/74 only, default=0)");
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
index 1a11aedc4fe850738bfe17d469232da64959b84d..68952d9ccf8394412654d76fc2c14edef3f74124 100644 (file)
@@ -608,7 +608,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
        struct usb_host_interface *iface_desc;
        struct usb_endpoint_descriptor *endpoint;
        struct usb_pcwd_private *usb_pcwd = NULL;
-       int pipe, maxp;
+       int pipe;
        int retval = -ENOMEM;
        int got_fw_rev;
        unsigned char fw_rev_major, fw_rev_minor;
@@ -641,7 +641,6 @@ static int usb_pcwd_probe(struct usb_interface *interface,
 
        /* get a handle to the interrupt data pipe */
        pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
 
        /* allocate memory for our device and initialize it */
        usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
index 01d816251302c2491c24a70e8f7c542b61c2f15a..e7a715e820217eb82a90cc8e5eed54d6d826e22c 100644 (file)
@@ -139,12 +139,11 @@ static int wdt_config(struct watchdog_device *wdd, bool ping)
 
        writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
        writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+       writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
 
-       if (!ping) {
-               writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+       if (!ping)
                writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
                                WDTCONTROL);
-       }
 
        writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644 (file)
index 0000000..1467fe5
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ *     sun4v watchdog timer
+ *     (c) Copyright 2016 Oracle Corporation
+ *
+ *     Implement a simple watchdog driver using the built-in sun4v hypervisor
+ *     watchdog support. If time expires, the hypervisor stops or bounces
+ *     the guest domain.
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/watchdog.h>
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#define WDT_TIMEOUT                    60
+#define WDT_MAX_TIMEOUT                        31536000
+#define WDT_MIN_TIMEOUT                        1
+#define WDT_DEFAULT_RESOLUTION_MS      1000    /* 1 second */
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+       __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+       __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int sun4v_wdt_stop(struct watchdog_device *wdd)
+{
+       sun4v_mach_set_watchdog(0, NULL);
+
+       return 0;
+}
+
+static int sun4v_wdt_ping(struct watchdog_device *wdd)
+{
+       int hverr;
+
+       /*
+        * HV watchdog timer will round up the timeout
+        * passed in to the nearest multiple of the
+        * watchdog resolution in milliseconds.
+        */
+       hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
+       if (hverr == HV_EINVAL)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
+                                unsigned int timeout)
+{
+       wdd->timeout = timeout;
+
+       return 0;
+}
+
+static const struct watchdog_info sun4v_wdt_ident = {
+       .options =      WDIOF_SETTIMEOUT |
+                       WDIOF_MAGICCLOSE |
+                       WDIOF_KEEPALIVEPING,
+       .identity =     "sun4v hypervisor watchdog",
+       .firmware_version = 0,
+};
+
+static struct watchdog_ops sun4v_wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        sun4v_wdt_ping,
+       .stop =         sun4v_wdt_stop,
+       .ping =         sun4v_wdt_ping,
+       .set_timeout =  sun4v_wdt_set_timeout,
+};
+
+static struct watchdog_device wdd = {
+       .info = &sun4v_wdt_ident,
+       .ops = &sun4v_wdt_ops,
+       .min_timeout = WDT_MIN_TIMEOUT,
+       .max_timeout = WDT_MAX_TIMEOUT,
+       .timeout = WDT_TIMEOUT,
+};
+
+static int __init sun4v_wdt_init(void)
+{
+       struct mdesc_handle *handle;
+       u64 node;
+       const u64 *value;
+       int err = 0;
+       unsigned long major = 1, minor = 1;
+
+       /*
+        * There are 2 properties that can be set from the control
+        * domain for the watchdog.
+        * watchdog-resolution
+        * watchdog-max-timeout
+        *
+        * We can expect a handle to be returned otherwise something
+        * serious is wrong. Correct to return -ENODEV here.
+        */
+
+       handle = mdesc_grab();
+       if (!handle)
+               return -ENODEV;
+
+       node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
+       err = -ENODEV;
+       if (node == MDESC_NODE_NULL)
+               goto out_release;
+
+       /*
+        * This is a safe way to validate if we are on the right
+        * platform.
+        */
+       if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
+               goto out_hv_unreg;
+
+       /* Allow value of watchdog-resolution up to 1s (default) */
+       value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
+       err = -EINVAL;
+       if (value) {
+               if (*value == 0 ||
+                   *value > WDT_DEFAULT_RESOLUTION_MS)
+                       goto out_hv_unreg;
+       }
+
+       value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
+       if (value) {
+               /*
+                * If the property value (in ms) is smaller than
+                * min_timeout, return -EINVAL.
+                */
+               if (*value < wdd.min_timeout * 1000)
+                       goto out_hv_unreg;
+
+               /*
+                * If the property value is smaller than
+                * default max_timeout  then set watchdog max_timeout to
+                * the value of the property in seconds.
+                */
+               if (*value < wdd.max_timeout * 1000)
+                       wdd.max_timeout = *value  / 1000;
+       }
+
+       watchdog_init_timeout(&wdd, timeout, NULL);
+
+       watchdog_set_nowayout(&wdd, nowayout);
+
+       err = watchdog_register_device(&wdd);
+       if (err)
+               goto out_hv_unreg;
+
+       pr_info("initialized (timeout=%ds, nowayout=%d)\n",
+                wdd.timeout, nowayout);
+
+       mdesc_release(handle);
+
+       return 0;
+
+out_hv_unreg:
+       sun4v_hvapi_unregister(HV_GRP_CORE);
+
+out_release:
+       mdesc_release(handle);
+       return err;
+}
+
+static void __exit sun4v_wdt_exit(void)
+{
+       sun4v_hvapi_unregister(HV_GRP_CORE);
+       watchdog_unregister_device(&wdd);
+}
+
+module_init(sun4v_wdt_init);
+module_exit(sun4v_wdt_exit);
+
+MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
+MODULE_DESCRIPTION("sun4v watchdog driver");
+MODULE_LICENSE("GPL");
index 945fc43272017cfe05bab171eb8b97e62057244c..4ac2ca8a76561952798f7c49bdd292719d0439a8 100644 (file)
@@ -242,7 +242,7 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
-static struct cleancache_ops tmem_cleancache_ops = {
+static const struct cleancache_ops tmem_cleancache_ops = {
        .put_page = tmem_cleancache_put_page,
        .get_page = tmem_cleancache_get_page,
        .invalidate_page = tmem_cleancache_flush_page,
index 36b210f9b6b29621fa9e410db063d3df53d7fb75..9282dbf5abdba6b01e059a45e7fc9895002dbd2d 100644 (file)
@@ -65,8 +65,7 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
                                 struct bin_attribute *bin_attr,
                                 char *buf, loff_t off, size_t count)
 {
-       struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device,
-                                          kobj));
+       struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
        struct ConfigDev cd;
 
        /* Construct a ConfigDev */
index 9adee0d7536e11343c9050b84174be6d2b90bdfc..922893f8ab4a5f6f2f9241e713c6fd4a6de103a0 100644 (file)
@@ -207,6 +207,7 @@ menuconfig MISC_FILESYSTEMS
 
 if MISC_FILESYSTEMS
 
+source "fs/orangefs/Kconfig"
 source "fs/adfs/Kconfig"
 source "fs/affs/Kconfig"
 source "fs/ecryptfs/Kconfig"
index 79f522575cba3e79e6909ca4c1c055d2cb54ce9a..f0f2f00951f505db759590419b1b04a091ee8874 100644 (file)
@@ -105,6 +105,7 @@ obj-$(CONFIG_AUTOFS4_FS)    += autofs4/
 obj-$(CONFIG_ADFS_FS)          += adfs/
 obj-$(CONFIG_FUSE_FS)          += fuse/
 obj-$(CONFIG_OVERLAY_FS)       += overlayfs/
+obj-$(CONFIG_ORANGEFS_FS)       += orangefs/
 obj-$(CONFIG_UDF_FS)           += udf/
 obj-$(CONFIG_SUN_OPENPROMFS)   += openpromfs/
 obj-$(CONFIG_OMFS_FS)          += omfs/
index 7b9cd49622b132f5f71e6557f05d0a2591a37c16..39b3a174a4253974b4635bee951e2283cf71b9cf 100644 (file)
@@ -1730,43 +1730,25 @@ static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return __dax_fault(vma, vmf, blkdev_get_block, NULL);
 }
 
-static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, unsigned int flags)
-{
-       return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
-}
-
-static void blkdev_vm_open(struct vm_area_struct *vma)
+static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma,
+               struct vm_fault *vmf)
 {
-       struct inode *bd_inode = bdev_file_inode(vma->vm_file);
-       struct block_device *bdev = I_BDEV(bd_inode);
-
-       inode_lock(bd_inode);
-       bdev->bd_map_count++;
-       inode_unlock(bd_inode);
+       return dax_pfn_mkwrite(vma, vmf);
 }
 
-static void blkdev_vm_close(struct vm_area_struct *vma)
+static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+               pmd_t *pmd, unsigned int flags)
 {
-       struct inode *bd_inode = bdev_file_inode(vma->vm_file);
-       struct block_device *bdev = I_BDEV(bd_inode);
-
-       inode_lock(bd_inode);
-       bdev->bd_map_count--;
-       inode_unlock(bd_inode);
+       return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
 }
 
 static const struct vm_operations_struct blkdev_dax_vm_ops = {
-       .open           = blkdev_vm_open,
-       .close          = blkdev_vm_close,
        .fault          = blkdev_dax_fault,
        .pmd_fault      = blkdev_dax_pmd_fault,
-       .pfn_mkwrite    = blkdev_dax_fault,
+       .pfn_mkwrite    = blkdev_dax_pfn_mkwrite,
 };
 
 static const struct vm_operations_struct blkdev_default_vm_ops = {
-       .open           = blkdev_vm_open,
-       .close          = blkdev_vm_close,
        .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
 };
@@ -1774,18 +1756,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = {
 static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct inode *bd_inode = bdev_file_inode(file);
-       struct block_device *bdev = I_BDEV(bd_inode);
 
        file_accessed(file);
-       inode_lock(bd_inode);
-       bdev->bd_map_count++;
        if (IS_DAX(bd_inode)) {
                vma->vm_ops = &blkdev_dax_vm_ops;
                vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
        } else {
                vma->vm_ops = &blkdev_default_vm_ops;
        }
-       inode_unlock(bd_inode);
 
        return 0;
 }
index 88d9af3d4581f7c45aa0065fa467d097625be5c1..5fb60ea7eee2b2c28e067d11b54c2fad7127c7d8 100644 (file)
@@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
                list_add_tail(&work->ordered_list, &wq->ordered_list);
                spin_unlock_irqrestore(&wq->list_lock, flags);
        }
-       queue_work(wq->normal_wq, &work->normal_work);
        trace_btrfs_work_queued(work);
+       queue_work(wq->normal_wq, &work->normal_work);
 }
 
 void btrfs_queue_work(struct btrfs_workqueue *wq,
index 769e0ff1b4cebe998cc29f567d6fae73be4b8fb0..f5ef7d171bb888f48017080e91244a638f515255 100644 (file)
@@ -311,7 +311,7 @@ struct tree_mod_root {
 
 struct tree_mod_elem {
        struct rb_node node;
-       u64 index;              /* shifted logical */
+       u64 logical;
        u64 seq;
        enum mod_log_op op;
 
@@ -435,11 +435,11 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
 
 /*
  * key order of the log:
- *       index -> sequence
+ *       node/leaf start address -> sequence
  *
- * the index is the shifted logical of the *new* root node for root replace
- * operations, or the shifted logical of the affected block for all other
- * operations.
+ * The 'start address' is the logical address of the *new* root node
+ * for root replace operations, or the logical address of the affected
+ * block for all other operations.
  *
  * Note: must be called with write lock (tree_mod_log_write_lock).
  */
@@ -460,9 +460,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
        while (*new) {
                cur = container_of(*new, struct tree_mod_elem, node);
                parent = *new;
-               if (cur->index < tm->index)
+               if (cur->logical < tm->logical)
                        new = &((*new)->rb_left);
-               else if (cur->index > tm->index)
+               else if (cur->logical > tm->logical)
                        new = &((*new)->rb_right);
                else if (cur->seq < tm->seq)
                        new = &((*new)->rb_left);
@@ -523,7 +523,7 @@ alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
        if (!tm)
                return NULL;
 
-       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       tm->logical = eb->start;
        if (op != MOD_LOG_KEY_ADD) {
                btrfs_node_key(eb, &tm->key, slot);
                tm->blockptr = btrfs_node_blockptr(eb, slot);
@@ -588,7 +588,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
                goto free_tms;
        }
 
-       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       tm->logical = eb->start;
        tm->slot = src_slot;
        tm->move.dst_slot = dst_slot;
        tm->move.nr_items = nr_items;
@@ -699,7 +699,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
                goto free_tms;
        }
 
-       tm->index = new_root->start >> PAGE_CACHE_SHIFT;
+       tm->logical = new_root->start;
        tm->old_root.logical = old_root->start;
        tm->old_root.level = btrfs_header_level(old_root);
        tm->generation = btrfs_header_generation(old_root);
@@ -739,16 +739,15 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
        struct rb_node *node;
        struct tree_mod_elem *cur = NULL;
        struct tree_mod_elem *found = NULL;
-       u64 index = start >> PAGE_CACHE_SHIFT;
 
        tree_mod_log_read_lock(fs_info);
        tm_root = &fs_info->tree_mod_log;
        node = tm_root->rb_node;
        while (node) {
                cur = container_of(node, struct tree_mod_elem, node);
-               if (cur->index < index) {
+               if (cur->logical < start) {
                        node = node->rb_left;
-               } else if (cur->index > index) {
+               } else if (cur->logical > start) {
                        node = node->rb_right;
                } else if (cur->seq < min_seq) {
                        node = node->rb_left;
@@ -1230,9 +1229,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                return NULL;
 
        /*
-        * the very last operation that's logged for a root is the replacement
-        * operation (if it is replaced at all). this has the index of the *new*
-        * root, making it the very first operation that's logged for this root.
+        * the very last operation that's logged for a root is the
+        * replacement operation (if it is replaced at all). this has
+        * the logical address of the *new* root, making it the very
+        * first operation that's logged for this root.
         */
        while (1) {
                tm = tree_mod_log_search_oldest(fs_info, root_logical,
@@ -1336,7 +1336,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
                if (!next)
                        break;
                tm = container_of(next, struct tree_mod_elem, node);
-               if (tm->index != first_tm->index)
+               if (tm->logical != first_tm->logical)
                        break;
        }
        tree_mod_log_read_unlock(fs_info);
index bfe4a337fb4d13a058446265b7baf4a1437aa602..5f5c4fbd7a3c9880d56c86a5ddd16df03cb35f5a 100644 (file)
@@ -2353,6 +2353,9 @@ struct btrfs_map_token {
        unsigned long offset;
 };
 
+#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
+                               ((bytes) >> (fs_info)->sb->s_blocksize_bits)
+
 static inline void btrfs_init_map_token (struct btrfs_map_token *token)
 {
        token->kaddr = NULL;
@@ -4027,7 +4030,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
                        struct inode *dir, u64 objectid,
                        const char *name, int name_len);
-int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
+int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
                        int front);
 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root,
index dd08e29f51177d27859c12f0d173224a24a1f4a9..4545e2e2ad45a556113eecf30b1a7d49ee7ca260 100644 (file)
@@ -182,6 +182,7 @@ static struct btrfs_lockdep_keyset {
        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
        { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
+       { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
        { .id = 0,                              .name_stem = "tree"     },
 };
 
@@ -1787,7 +1788,6 @@ static int cleaner_kthread(void *arg)
        int again;
        struct btrfs_trans_handle *trans;
 
-       set_freezable();
        do {
                again = 0;
 
index 2e7c97a3f3444aec33a688a4d1f705b0486026cf..1b2073389dc2cf71ef5051071acc98588f393a8d 100644 (file)
@@ -3186,7 +3186,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 
        while (1) {
                lock_extent(tree, start, end);
-               ordered = btrfs_lookup_ordered_extent(inode, start);
+               ordered = btrfs_lookup_ordered_range(inode, start,
+                                               PAGE_CACHE_SIZE);
                if (!ordered)
                        break;
                unlock_extent(tree, start, end);
index a67e1c828d0f735c3974e9ff0dca098cd759e9d3..1c50a7b09b4e4bd87e88eef65b9b3b32401083e8 100644 (file)
@@ -172,6 +172,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        u64 item_start_offset = 0;
        u64 item_last_offset = 0;
        u64 disk_bytenr;
+       u64 page_bytes_left;
        u32 diff;
        int nblocks;
        int bio_index = 0;
@@ -220,6 +221,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
        if (dio)
                offset = logical_offset;
+
+       page_bytes_left = bvec->bv_len;
        while (bio_index < bio->bi_vcnt) {
                if (!dio)
                        offset = page_offset(bvec->bv_page) + bvec->bv_offset;
@@ -243,7 +246,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                                if (BTRFS_I(inode)->root->root_key.objectid ==
                                    BTRFS_DATA_RELOC_TREE_OBJECTID) {
                                        set_extent_bits(io_tree, offset,
-                                               offset + bvec->bv_len - 1,
+                                               offset + root->sectorsize - 1,
                                                EXTENT_NODATASUM, GFP_NOFS);
                                } else {
                                        btrfs_info(BTRFS_I(inode)->root->fs_info,
@@ -281,11 +284,17 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 found:
                csum += count * csum_size;
                nblocks -= count;
-               bio_index += count;
+
                while (count--) {
-                       disk_bytenr += bvec->bv_len;
-                       offset += bvec->bv_len;
-                       bvec++;
+                       disk_bytenr += root->sectorsize;
+                       offset += root->sectorsize;
+                       page_bytes_left -= root->sectorsize;
+                       if (!page_bytes_left) {
+                               bio_index++;
+                               bvec++;
+                               page_bytes_left = bvec->bv_len;
+                       }
+
                }
        }
        btrfs_free_path(path);
@@ -432,6 +441,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
        struct bio_vec *bvec = bio->bi_io_vec;
        int bio_index = 0;
        int index;
+       int nr_sectors;
+       int i;
        unsigned long total_bytes = 0;
        unsigned long this_sum_bytes = 0;
        u64 offset;
@@ -459,41 +470,56 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                if (!contig)
                        offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 
-               if (offset >= ordered->file_offset + ordered->len ||
-                   offset < ordered->file_offset) {
-                       unsigned long bytes_left;
-                       sums->len = this_sum_bytes;
-                       this_sum_bytes = 0;
-                       btrfs_add_ordered_sum(inode, ordered, sums);
-                       btrfs_put_ordered_extent(ordered);
+               data = kmap_atomic(bvec->bv_page);
 
-                       bytes_left = bio->bi_iter.bi_size - total_bytes;
+               nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               bvec->bv_len + root->sectorsize
+                                               - 1);
+
+               for (i = 0; i < nr_sectors; i++) {
+                       if (offset >= ordered->file_offset + ordered->len ||
+                               offset < ordered->file_offset) {
+                               unsigned long bytes_left;
+
+                               kunmap_atomic(data);
+                               sums->len = this_sum_bytes;
+                               this_sum_bytes = 0;
+                               btrfs_add_ordered_sum(inode, ordered, sums);
+                               btrfs_put_ordered_extent(ordered);
+
+                               bytes_left = bio->bi_iter.bi_size - total_bytes;
+
+                               sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
+                                       GFP_NOFS);
+                               BUG_ON(!sums); /* -ENOMEM */
+                               sums->len = bytes_left;
+                               ordered = btrfs_lookup_ordered_extent(inode,
+                                                               offset);
+                               ASSERT(ordered); /* Logic error */
+                               sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
+                                       + total_bytes;
+                               index = 0;
+
+                               data = kmap_atomic(bvec->bv_page);
+                       }
 
-                       sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
-                                      GFP_NOFS);
-                       BUG_ON(!sums); /* -ENOMEM */
-                       sums->len = bytes_left;
-                       ordered = btrfs_lookup_ordered_extent(inode, offset);
-                       BUG_ON(!ordered); /* Logic error */
-                       sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
-                                      total_bytes;
-                       index = 0;
+                       sums->sums[index] = ~(u32)0;
+                       sums->sums[index]
+                               = btrfs_csum_data(data + bvec->bv_offset
+                                               + (i * root->sectorsize),
+                                               sums->sums[index],
+                                               root->sectorsize);
+                       btrfs_csum_final(sums->sums[index],
+                                       (char *)(sums->sums + index));
+                       index++;
+                       offset += root->sectorsize;
+                       this_sum_bytes += root->sectorsize;
+                       total_bytes += root->sectorsize;
                }
 
-               data = kmap_atomic(bvec->bv_page);
-               sums->sums[index] = ~(u32)0;
-               sums->sums[index] = btrfs_csum_data(data + bvec->bv_offset,
-                                                   sums->sums[index],
-                                                   bvec->bv_len);
                kunmap_atomic(data);
-               btrfs_csum_final(sums->sums[index],
-                                (char *)(sums->sums + index));
 
                bio_index++;
-               index++;
-               total_bytes += bvec->bv_len;
-               this_sum_bytes += bvec->bv_len;
-               offset += bvec->bv_len;
                bvec++;
        }
        this_sum_bytes = 0;
index 098bb8f690c992e1ebd01270f49ff2d37e6658bd..5a58e292bdadc7d586086102f42c540e5f52fb98 100644 (file)
@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
        loff_t isize = i_size_read(inode);
 
        start_pos = pos & ~((u64)root->sectorsize - 1);
-       num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
+       num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
 
        end_of_last_block = start_pos + num_bytes - 1;
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -1379,16 +1379,19 @@ fail:
 static noinline int
 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
                                size_t num_pages, loff_t pos,
+                               size_t write_bytes,
                                u64 *lockstart, u64 *lockend,
                                struct extent_state **cached_state)
 {
+       struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 start_pos;
        u64 last_pos;
        int i;
        int ret = 0;
 
-       start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
-       last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
+       start_pos = round_down(pos, root->sectorsize);
+       last_pos = start_pos
+               + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
 
        if (start_pos < inode->i_size) {
                struct btrfs_ordered_extent *ordered;
@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 
        while (iov_iter_count(i) > 0) {
                size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+               size_t sector_offset;
                size_t write_bytes = min(iov_iter_count(i),
                                         nrptrs * (size_t)PAGE_CACHE_SIZE -
                                         offset);
@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                size_t reserve_bytes;
                size_t dirty_pages;
                size_t copied;
+               size_t dirty_sectors;
+               size_t num_sectors;
 
                WARN_ON(num_pages > nrptrs);
 
@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                        break;
                }
 
-               reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+               sector_offset = pos & (root->sectorsize - 1);
+               reserve_bytes = round_up(write_bytes + sector_offset,
+                               root->sectorsize);
 
                if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                             BTRFS_INODE_PREALLOC)) {
@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                                 */
                                num_pages = DIV_ROUND_UP(write_bytes + offset,
                                                         PAGE_CACHE_SIZE);
-                               reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+                               reserve_bytes = round_up(write_bytes
+                                                       + sector_offset,
+                                                       root->sectorsize);
                                goto reserve_metadata;
                        }
                }
@@ -1576,8 +1586,8 @@ again:
                        break;
 
                ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
-                                                     pos, &lockstart, &lockend,
-                                                     &cached_state);
+                                               pos, write_bytes, &lockstart,
+                                               &lockend, &cached_state);
                if (ret < 0) {
                        if (ret == -EAGAIN)
                                goto again;
@@ -1612,9 +1622,16 @@ again:
                 * we still have an outstanding extent for the chunk we actually
                 * managed to copy.
                 */
-               if (num_pages > dirty_pages) {
-                       release_bytes = (num_pages - dirty_pages) <<
-                               PAGE_CACHE_SHIFT;
+               num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               reserve_bytes);
+               dirty_sectors = round_up(copied + sector_offset,
+                                       root->sectorsize);
+               dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               dirty_sectors);
+
+               if (num_sectors > dirty_sectors) {
+                       release_bytes = (write_bytes - copied)
+                               & ~((u64)root->sectorsize - 1);
                        if (copied > 0) {
                                spin_lock(&BTRFS_I(inode)->lock);
                                BTRFS_I(inode)->outstanding_extents++;
@@ -1633,7 +1650,8 @@ again:
                        }
                }
 
-               release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
+               release_bytes = round_up(copied + sector_offset,
+                                       root->sectorsize);
 
                if (copied > 0)
                        ret = btrfs_dirty_pages(root, inode, pages,
@@ -1654,8 +1672,7 @@ again:
 
                if (only_release_metadata && copied > 0) {
                        lockstart = round_down(pos, root->sectorsize);
-                       lockend = lockstart +
-                               (dirty_pages << PAGE_CACHE_SHIFT) - 1;
+                       lockend = round_up(pos + copied, root->sectorsize) - 1;
 
                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
                                       lockend, EXTENT_NORESERVE, NULL,
@@ -1761,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        ssize_t err;
        loff_t pos;
        size_t count;
+       loff_t oldsize;
+       int clean_page = 0;
 
        inode_lock(inode);
        err = generic_write_checks(iocb, from);
@@ -1799,14 +1818,17 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        pos = iocb->ki_pos;
        count = iov_iter_count(from);
        start_pos = round_down(pos, root->sectorsize);
-       if (start_pos > i_size_read(inode)) {
+       oldsize = i_size_read(inode);
+       if (start_pos > oldsize) {
                /* Expand hole size to cover write data, preventing empty gap */
                end_pos = round_up(pos + count, root->sectorsize);
-               err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
+               err = btrfs_cont_expand(inode, oldsize, end_pos);
                if (err) {
                        inode_unlock(inode);
                        goto out;
                }
+               if (start_pos > round_up(oldsize, root->sectorsize))
+                       clean_page = 1;
        }
 
        if (sync)
@@ -1818,6 +1840,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
                num_written = __btrfs_buffered_write(file, from, pos);
                if (num_written > 0)
                        iocb->ki_pos = pos + num_written;
+               if (clean_page)
+                       pagecache_isize_extended(inode, oldsize,
+                                               i_size_read(inode));
        }
 
        inode_unlock(inode);
@@ -2293,10 +2318,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        int ret = 0;
        int err = 0;
        unsigned int rsv_count;
-       bool same_page;
+       bool same_block;
        bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
        u64 ino_size;
-       bool truncated_page = false;
+       bool truncated_block = false;
        bool updated_inode = false;
 
        ret = btrfs_wait_ordered_range(inode, offset, len);
@@ -2304,7 +2329,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                return ret;
 
        inode_lock(inode);
-       ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
+       ino_size = round_up(inode->i_size, root->sectorsize);
        ret = find_first_non_hole(inode, &offset, &len);
        if (ret < 0)
                goto out_only_mutex;
@@ -2317,31 +2342,30 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
        lockend = round_down(offset + len,
                             BTRFS_I(inode)->root->sectorsize) - 1;
-       same_page = ((offset >> PAGE_CACHE_SHIFT) ==
-                   ((offset + len - 1) >> PAGE_CACHE_SHIFT));
-
+       same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
+               == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
        /*
-        * We needn't truncate any page which is beyond the end of the file
+        * We needn't truncate any block which is beyond the end of the file
         * because we are sure there is no data there.
         */
        /*
-        * Only do this if we are in the same page and we aren't doing the
-        * entire page.
+        * Only do this if we are in the same block and we aren't doing the
+        * entire block.
         */
-       if (same_page && len < PAGE_CACHE_SIZE) {
+       if (same_block && len < root->sectorsize) {
                if (offset < ino_size) {
-                       truncated_page = true;
-                       ret = btrfs_truncate_page(inode, offset, len, 0);
+                       truncated_block = true;
+                       ret = btrfs_truncate_block(inode, offset, len, 0);
                } else {
                        ret = 0;
                }
                goto out_only_mutex;
        }
 
-       /* zero back part of the first page */
+       /* zero back part of the first block */
        if (offset < ino_size) {
-               truncated_page = true;
-               ret = btrfs_truncate_page(inode, offset, 0, 0);
+               truncated_block = true;
+               ret = btrfs_truncate_block(inode, offset, 0, 0);
                if (ret) {
                        inode_unlock(inode);
                        return ret;
@@ -2376,9 +2400,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                if (!ret) {
                        /* zero the front end of the last page */
                        if (tail_start + tail_len < ino_size) {
-                               truncated_page = true;
-                               ret = btrfs_truncate_page(inode,
-                                               tail_start + tail_len, 0, 1);
+                               truncated_block = true;
+                               ret = btrfs_truncate_block(inode,
+                                                       tail_start + tail_len,
+                                                       0, 1);
                                if (ret)
                                        goto out_only_mutex;
                        }
@@ -2558,7 +2583,7 @@ out:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                             &cached_state, GFP_NOFS);
 out_only_mutex:
-       if (!updated_inode && truncated_page && !ret && !err) {
+       if (!updated_inode && truncated_block && !ret && !err) {
                /*
                 * If we only end up zeroing part of a page, we still need to
                 * update the inode item, so that all the time fields are
@@ -2678,10 +2703,10 @@ static long btrfs_fallocate(struct file *file, int mode,
        } else if (offset + len > inode->i_size) {
                /*
                 * If we are fallocating from the end of the file onward we
-                * need to zero out the end of the page if i_size lands in the
-                * middle of a page.
+                * need to zero out the end of the block if i_size lands in the
+                * middle of a block.
                 */
-               ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
+               ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
                if (ret)
                        goto out;
        }
index 393e36bd5845d996d216e14ef2c6ac6866fce746..53dbeaf6ce941cc7a6a8bf06c6a81df354e9e364 100644 (file)
@@ -153,6 +153,20 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
 
 static unsigned long *alloc_bitmap(u32 bitmap_size)
 {
+       void *mem;
+
+       /*
+        * The allocation size varies, observed numbers were < 4K up to 16K.
+        * Using vmalloc unconditionally would be too heavy, we'll try
+        * contiguous allocations first.
+        */
+       if  (bitmap_size <= PAGE_SIZE)
+               return kzalloc(bitmap_size, GFP_NOFS);
+
+       mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN);
+       if (mem)
+               return mem;
+
        return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
                         PAGE_KERNEL);
 }
@@ -289,7 +303,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 
        ret = 0;
 out:
-       vfree(bitmap);
+       kvfree(bitmap);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
        return ret;
@@ -438,7 +452,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 
        ret = 0;
 out:
-       vfree(bitmap);
+       kvfree(bitmap);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
        return ret;
index e28f3d4691afee8bf6dcd68d0617a27e07439e82..3e0d4151151723446ef4d4cee6dfd7959ddd53d7 100644 (file)
@@ -263,7 +263,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
                data_len = compressed_size;
 
        if (start > 0 ||
-           actual_end > PAGE_CACHE_SIZE ||
+           actual_end > root->sectorsize ||
            data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
            (!compressed_size &&
            (actual_end & (root->sectorsize - 1)) == 0) ||
@@ -2002,7 +2002,8 @@ again:
        if (PagePrivate2(page))
                goto out;
 
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+       ordered = btrfs_lookup_ordered_range(inode, page_start,
+                                       PAGE_CACHE_SIZE);
        if (ordered) {
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
                                     page_end, &cached_state, GFP_NOFS);
@@ -4248,7 +4249,8 @@ static int truncate_inline_extent(struct inode *inode,
                 * read the extent item from disk (data not in the page cache).
                 */
                btrfs_release_path(path);
-               return btrfs_truncate_page(inode, offset, page_end - offset, 0);
+               return btrfs_truncate_block(inode, offset, page_end - offset,
+                                       0);
        }
 
        btrfs_set_file_extent_ram_bytes(leaf, fi, size);
@@ -4601,17 +4603,17 @@ error:
 }
 
 /*
- * btrfs_truncate_page - read, zero a chunk and write a page
+ * btrfs_truncate_block - read, zero a chunk and write a block
  * @inode - inode that we're zeroing
  * @from - the offset to start zeroing
  * @len - the length to zero, 0 to zero the entire range respective to the
  *     offset
  * @front - zero up to the offset instead of from the offset on
  *
- * This will find the page for the "from" offset and cow the page and zero the
+ * This will find the block for the "from" offset and cow the block and zero the
  * part we want to zero.  This is used with truncate and hole punching.
  */
-int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
+int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
                        int front)
 {
        struct address_space *mapping = inode->i_mapping;
@@ -4622,18 +4624,19 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
        char *kaddr;
        u32 blocksize = root->sectorsize;
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned offset = from & (blocksize - 1);
        struct page *page;
        gfp_t mask = btrfs_alloc_write_mask(mapping);
        int ret = 0;
-       u64 page_start;
-       u64 page_end;
+       u64 block_start;
+       u64 block_end;
 
        if ((offset & (blocksize - 1)) == 0 &&
            (!len || ((len & (blocksize - 1)) == 0)))
                goto out;
+
        ret = btrfs_delalloc_reserve_space(inode,
-                       round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
+                       round_down(from, blocksize), blocksize);
        if (ret)
                goto out;
 
@@ -4641,14 +4644,14 @@ again:
        page = find_or_create_page(mapping, index, mask);
        if (!page) {
                btrfs_delalloc_release_space(inode,
-                               round_down(from, PAGE_CACHE_SIZE),
-                               PAGE_CACHE_SIZE);
+                               round_down(from, blocksize),
+                               blocksize);
                ret = -ENOMEM;
                goto out;
        }
 
-       page_start = page_offset(page);
-       page_end = page_start + PAGE_CACHE_SIZE - 1;
+       block_start = round_down(from, blocksize);
+       block_end = block_start + blocksize - 1;
 
        if (!PageUptodate(page)) {
                ret = btrfs_readpage(NULL, page);
@@ -4665,12 +4668,12 @@ again:
        }
        wait_on_page_writeback(page);
 
-       lock_extent_bits(io_tree, page_start, page_end, &cached_state);
+       lock_extent_bits(io_tree, block_start, block_end, &cached_state);
        set_page_extent_mapped(page);
 
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+       ordered = btrfs_lookup_ordered_extent(inode, block_start);
        if (ordered) {
-               unlock_extent_cached(io_tree, page_start, page_end,
+               unlock_extent_cached(io_tree, block_start, block_end,
                                     &cached_state, GFP_NOFS);
                unlock_page(page);
                page_cache_release(page);
@@ -4679,39 +4682,41 @@ again:
                goto again;
        }
 
-       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
                          EXTENT_DIRTY | EXTENT_DELALLOC |
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+       ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
                                        &cached_state);
        if (ret) {
-               unlock_extent_cached(io_tree, page_start, page_end,
+               unlock_extent_cached(io_tree, block_start, block_end,
                                     &cached_state, GFP_NOFS);
                goto out_unlock;
        }
 
-       if (offset != PAGE_CACHE_SIZE) {
+       if (offset != blocksize) {
                if (!len)
-                       len = PAGE_CACHE_SIZE - offset;
+                       len = blocksize - offset;
                kaddr = kmap(page);
                if (front)
-                       memset(kaddr, 0, offset);
+                       memset(kaddr + (block_start - page_offset(page)),
+                               0, offset);
                else
-                       memset(kaddr + offset, 0, len);
+                       memset(kaddr + (block_start - page_offset(page)) +  offset,
+                               0, len);
                flush_dcache_page(page);
                kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
-       unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
+       unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
                             GFP_NOFS);
 
 out_unlock:
        if (ret)
-               btrfs_delalloc_release_space(inode, page_start,
-                                            PAGE_CACHE_SIZE);
+               btrfs_delalloc_release_space(inode, block_start,
+                                            blocksize);
        unlock_page(page);
        page_cache_release(page);
 out:
@@ -4782,11 +4787,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
        int err = 0;
 
        /*
-        * If our size started in the middle of a page we need to zero out the
-        * rest of the page before we expand the i_size, otherwise we could
+        * If our size started in the middle of a block we need to zero out the
+        * rest of the block before we expand the i_size, otherwise we could
         * expose stale data.
         */
-       err = btrfs_truncate_page(inode, oldsize, 0, 0);
+       err = btrfs_truncate_block(inode, oldsize, 0, 0);
        if (err)
                return err;
 
@@ -4895,7 +4900,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
        }
 
        if (newsize > oldsize) {
-               truncate_pagecache(inode, newsize);
                /*
                 * Don't do an expanding truncate while snapshoting is ongoing.
                 * This is to ensure the snapshot captures a fully consistent
@@ -4918,6 +4922,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 
                i_size_write(inode, newsize);
                btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+               pagecache_isize_extended(inode, oldsize, newsize);
                ret = btrfs_update_inode(trans, root, inode);
                btrfs_end_write_no_snapshoting(root);
                btrfs_end_transaction(trans, root);
@@ -7116,21 +7121,41 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        if (ret)
                return ERR_PTR(ret);
 
-       em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
-                             ins.offset, ins.offset, ins.offset, 0);
-       if (IS_ERR(em)) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
-               return em;
-       }
-
+       /*
+        * Create the ordered extent before the extent map. This is to avoid
+        * races with the fast fsync path that would lead to it logging file
+        * extent items that point to disk extents that were not yet written to.
+        * The fast fsync path collects ordered extents into a local list and
+        * then collects all the new extent maps, so we must create the ordered
+        * extent first and make sure the fast fsync path collects any new
+        * ordered extents after collecting new extent maps as well.
+        * The fsync path simply can not rely on inode_dio_wait() because it
+        * causes deadlock with AIO.
+        */
        ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
                                           ins.offset, ins.offset, 0);
        if (ret) {
                btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
-               free_extent_map(em);
                return ERR_PTR(ret);
        }
 
+       em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
+                             ins.offset, ins.offset, ins.offset, 0);
+       if (IS_ERR(em)) {
+               struct btrfs_ordered_extent *oe;
+
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+               oe = btrfs_lookup_ordered_extent(inode, start);
+               ASSERT(oe);
+               if (WARN_ON(!oe))
+                       return em;
+               set_bit(BTRFS_ORDERED_IOERR, &oe->flags);
+               set_bit(BTRFS_ORDERED_IO_DONE, &oe->flags);
+               btrfs_remove_ordered_extent(inode, oe);
+               /* Once for our lookup and once for the ordered extents tree. */
+               btrfs_put_ordered_extent(oe);
+               btrfs_put_ordered_extent(oe);
+       }
        return em;
 }
 
@@ -7732,9 +7757,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
 }
 
 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
-                         struct page *page, u64 start, u64 end,
-                         int failed_mirror, bio_end_io_t *repair_endio,
-                         void *repair_arg)
+                       struct page *page, unsigned int pgoff,
+                       u64 start, u64 end, int failed_mirror,
+                       bio_end_io_t *repair_endio, void *repair_arg)
 {
        struct io_failure_record *failrec;
        struct bio *bio;
@@ -7755,7 +7780,9 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
                return -EIO;
        }
 
-       if (failed_bio->bi_vcnt > 1)
+       if ((failed_bio->bi_vcnt > 1)
+               || (failed_bio->bi_io_vec->bv_len
+                       > BTRFS_I(inode)->root->sectorsize))
                read_mode = READ_SYNC | REQ_FAILFAST_DEV;
        else
                read_mode = READ_SYNC;
@@ -7763,7 +7790,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
        isector = start - btrfs_io_bio(failed_bio)->logical;
        isector >>= inode->i_sb->s_blocksize_bits;
        bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
-                                     0, isector, repair_endio, repair_arg);
+                               pgoff, isector, repair_endio, repair_arg);
        if (!bio) {
                free_io_failure(inode, failrec);
                return -EIO;
@@ -7793,12 +7820,17 @@ struct btrfs_retry_complete {
 static void btrfs_retry_endio_nocsum(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
+       struct inode *inode;
        struct bio_vec *bvec;
        int i;
 
        if (bio->bi_error)
                goto end;
 
+       ASSERT(bio->bi_vcnt == 1);
+       inode = bio->bi_io_vec->bv_page->mapping->host;
+       ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+
        done->uptodate = 1;
        bio_for_each_segment_all(bvec, bio, i)
                clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
@@ -7810,25 +7842,35 @@ end:
 static int __btrfs_correct_data_nocsum(struct inode *inode,
                                       struct btrfs_io_bio *io_bio)
 {
+       struct btrfs_fs_info *fs_info;
        struct bio_vec *bvec;
        struct btrfs_retry_complete done;
        u64 start;
+       unsigned int pgoff;
+       u32 sectorsize;
+       int nr_sectors;
        int i;
        int ret;
 
+       fs_info = BTRFS_I(inode)->root->fs_info;
+       sectorsize = BTRFS_I(inode)->root->sectorsize;
+
        start = io_bio->logical;
        done.inode = inode;
 
        bio_for_each_segment_all(bvec, &io_bio->bio, i) {
-try_again:
+               nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
+               pgoff = bvec->bv_offset;
+
+next_block_or_try_again:
                done.uptodate = 0;
                done.start = start;
                init_completion(&done.done);
 
-               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
-                                    start + bvec->bv_len - 1,
-                                    io_bio->mirror_num,
-                                    btrfs_retry_endio_nocsum, &done);
+               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
+                               pgoff, start, start + sectorsize - 1,
+                               io_bio->mirror_num,
+                               btrfs_retry_endio_nocsum, &done);
                if (ret)
                        return ret;
 
@@ -7836,10 +7878,15 @@ try_again:
 
                if (!done.uptodate) {
                        /* We might have another mirror, so try again */
-                       goto try_again;
+                       goto next_block_or_try_again;
                }
 
-               start += bvec->bv_len;
+               start += sectorsize;
+
+               if (nr_sectors--) {
+                       pgoff += sectorsize;
+                       goto next_block_or_try_again;
+               }
        }
 
        return 0;
@@ -7849,7 +7896,9 @@ static void btrfs_retry_endio(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+       struct inode *inode;
        struct bio_vec *bvec;
+       u64 start;
        int uptodate;
        int ret;
        int i;
@@ -7858,13 +7907,20 @@ static void btrfs_retry_endio(struct bio *bio)
                goto end;
 
        uptodate = 1;
+
+       start = done->start;
+
+       ASSERT(bio->bi_vcnt == 1);
+       inode = bio->bi_io_vec->bv_page->mapping->host;
+       ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(done->inode, io_bio, i,
-                                            bvec->bv_page, 0,
-                                            done->start, bvec->bv_len);
+                                       bvec->bv_page, bvec->bv_offset,
+                                       done->start, bvec->bv_len);
                if (!ret)
                        clean_io_failure(done->inode, done->start,
-                                        bvec->bv_page, 0);
+                                       bvec->bv_page, bvec->bv_offset);
                else
                        uptodate = 0;
        }
@@ -7878,20 +7934,34 @@ end:
 static int __btrfs_subio_endio_read(struct inode *inode,
                                    struct btrfs_io_bio *io_bio, int err)
 {
+       struct btrfs_fs_info *fs_info;
        struct bio_vec *bvec;
        struct btrfs_retry_complete done;
        u64 start;
        u64 offset = 0;
+       u32 sectorsize;
+       int nr_sectors;
+       unsigned int pgoff;
+       int csum_pos;
        int i;
        int ret;
 
+       fs_info = BTRFS_I(inode)->root->fs_info;
+       sectorsize = BTRFS_I(inode)->root->sectorsize;
+
        err = 0;
        start = io_bio->logical;
        done.inode = inode;
 
        bio_for_each_segment_all(bvec, &io_bio->bio, i) {
-               ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
-                                            0, start, bvec->bv_len);
+               nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
+
+               pgoff = bvec->bv_offset;
+next_block:
+               csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
+               ret = __readpage_endio_check(inode, io_bio, csum_pos,
+                                       bvec->bv_page, pgoff, start,
+                                       sectorsize);
                if (likely(!ret))
                        goto next;
 try_again:
@@ -7899,10 +7969,10 @@ try_again:
                done.start = start;
                init_completion(&done.done);
 
-               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
-                                    start + bvec->bv_len - 1,
-                                    io_bio->mirror_num,
-                                    btrfs_retry_endio, &done);
+               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
+                               pgoff, start, start + sectorsize - 1,
+                               io_bio->mirror_num,
+                               btrfs_retry_endio, &done);
                if (ret) {
                        err = ret;
                        goto next;
@@ -7915,8 +7985,15 @@ try_again:
                        goto try_again;
                }
 next:
-               offset += bvec->bv_len;
-               start += bvec->bv_len;
+               offset += sectorsize;
+               start += sectorsize;
+
+               ASSERT(nr_sectors);
+
+               if (--nr_sectors) {
+                       pgoff += sectorsize;
+                       goto next_block;
+               }
        }
 
        return err;
@@ -8168,9 +8245,11 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        u64 file_offset = dip->logical_offset;
        u64 submit_len = 0;
        u64 map_length;
-       int nr_pages = 0;
-       int ret;
+       u32 blocksize = root->sectorsize;
        int async_submit = 0;
+       int nr_sectors;
+       int ret;
+       int i;
 
        map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
@@ -8200,9 +8279,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        atomic_inc(&dip->pending_bios);
 
        while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
-               if (map_length < submit_len + bvec->bv_len ||
-                   bio_add_page(bio, bvec->bv_page, bvec->bv_len,
-                                bvec->bv_offset) < bvec->bv_len) {
+               nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
+               i = 0;
+next_block:
+               if (unlikely(map_length < submit_len + blocksize ||
+                   bio_add_page(bio, bvec->bv_page, blocksize,
+                           bvec->bv_offset + (i * blocksize)) < blocksize)) {
                        /*
                         * inc the count before we submit the bio so
                         * we know the end IO handler won't happen before
@@ -8223,7 +8305,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        file_offset += submit_len;
 
                        submit_len = 0;
-                       nr_pages = 0;
 
                        bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
                                                  start_sector, GFP_NOFS);
@@ -8241,9 +8322,14 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                                bio_put(bio);
                                goto out_err;
                        }
+
+                       goto next_block;
                } else {
-                       submit_len += bvec->bv_len;
-                       nr_pages++;
+                       submit_len += blocksize;
+                       if (--nr_sectors) {
+                               i++;
+                               goto next_block;
+                       }
                        bvec++;
                }
        }
@@ -8608,6 +8694,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
        struct extent_state *cached_state = NULL;
        u64 page_start = page_offset(page);
        u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+       u64 start;
+       u64 end;
        int inode_evicting = inode->i_state & I_FREEING;
 
        /*
@@ -8627,14 +8715,18 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
 
        if (!inode_evicting)
                lock_extent_bits(tree, page_start, page_end, &cached_state);
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+again:
+       start = page_start;
+       ordered = btrfs_lookup_ordered_range(inode, start,
+                                       page_end - start + 1);
        if (ordered) {
+               end = min(page_end, ordered->file_offset + ordered->len - 1);
                /*
                 * IO on this page will never be started, so we need
                 * to account for any ordered extents now
                 */
                if (!inode_evicting)
-                       clear_extent_bit(tree, page_start, page_end,
+                       clear_extent_bit(tree, start, end,
                                         EXTENT_DIRTY | EXTENT_DELALLOC |
                                         EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
                                         EXTENT_DEFRAG, 1, 0, &cached_state,
@@ -8651,22 +8743,26 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
 
                        spin_lock_irq(&tree->lock);
                        set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
-                       new_len = page_start - ordered->file_offset;
+                       new_len = start - ordered->file_offset;
                        if (new_len < ordered->truncated_len)
                                ordered->truncated_len = new_len;
                        spin_unlock_irq(&tree->lock);
 
                        if (btrfs_dec_test_ordered_pending(inode, &ordered,
-                                                          page_start,
-                                                          PAGE_CACHE_SIZE, 1))
+                                                          start,
+                                                          end - start + 1, 1))
                                btrfs_finish_ordered_io(ordered);
                }
                btrfs_put_ordered_extent(ordered);
                if (!inode_evicting) {
                        cached_state = NULL;
-                       lock_extent_bits(tree, page_start, page_end,
+                       lock_extent_bits(tree, start, end,
                                         &cached_state);
                }
+
+               start = end + 1;
+               if (start < page_end)
+                       goto again;
        }
 
        /*
@@ -8727,15 +8823,28 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        loff_t size;
        int ret;
        int reserved = 0;
+       u64 reserved_space;
        u64 page_start;
        u64 page_end;
+       u64 end;
+
+       reserved_space = PAGE_CACHE_SIZE;
 
        sb_start_pagefault(inode->i_sb);
        page_start = page_offset(page);
        page_end = page_start + PAGE_CACHE_SIZE - 1;
+       end = page_end;
 
+       /*
+        * Reserving delalloc space after obtaining the page lock can lead to
+        * deadlock. For example, if a dirty page is locked by this function
+        * and the call to btrfs_delalloc_reserve_space() ends up triggering
+        * dirty page write out, then the btrfs_writepage() function could
+        * end up waiting indefinitely to get a lock on the page currently
+        * being processed by btrfs_page_mkwrite() function.
+        */
        ret = btrfs_delalloc_reserve_space(inode, page_start,
-                                          PAGE_CACHE_SIZE);
+                                          reserved_space);
        if (!ret) {
                ret = file_update_time(vma->vm_file);
                reserved = 1;
@@ -8769,7 +8878,7 @@ again:
         * we can't set the delalloc bits if there are pending ordered
         * extents.  Drop our locks and wait for them to finish
         */
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+       ordered = btrfs_lookup_ordered_range(inode, page_start, page_end);
        if (ordered) {
                unlock_extent_cached(io_tree, page_start, page_end,
                                     &cached_state, GFP_NOFS);
@@ -8779,6 +8888,18 @@ again:
                goto again;
        }
 
+       if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
+               reserved_space = round_up(size - page_start, root->sectorsize);
+               if (reserved_space < PAGE_CACHE_SIZE) {
+                       end = page_start + reserved_space - 1;
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       BTRFS_I(inode)->outstanding_extents++;
+                       spin_unlock(&BTRFS_I(inode)->lock);
+                       btrfs_delalloc_release_space(inode, page_start,
+                                               PAGE_CACHE_SIZE - reserved_space);
+               }
+       }
+
        /*
         * XXX - page_mkwrite gets called every time the page is dirtied, even
         * if it was already dirty, so for space accounting reasons we need to
@@ -8786,12 +8907,12 @@ again:
         * is probably a better way to do this, but for now keep consistent with
         * prepare_pages in the normal write path.
         */
-       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
                          EXTENT_DIRTY | EXTENT_DELALLOC |
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+       ret = btrfs_set_extent_delalloc(inode, page_start, end,
                                        &cached_state);
        if (ret) {
                unlock_extent_cached(io_tree, page_start, page_end,
@@ -8830,7 +8951,7 @@ out_unlock:
        }
        unlock_page(page);
 out:
-       btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
+       btrfs_delalloc_release_space(inode, page_start, reserved_space);
 out_noreserve:
        sb_end_pagefault(inode->i_sb);
        return ret;
@@ -9216,7 +9337,6 @@ static int btrfs_getattr(struct vfsmount *mnt,
 
        generic_fillattr(inode, stat);
        stat->dev = BTRFS_I(inode)->root->anon_dev;
-       stat->blksize = PAGE_CACHE_SIZE;
 
        spin_lock(&BTRFS_I(inode)->lock);
        delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
index 952172ca7e455633c28a79292d18ebbfd68c4d18..93e7832d1d1b22cc93b484ef34ca030c919ab74b 100644 (file)
@@ -3814,8 +3814,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
         * Truncate page cache pages so that future reads will see the cloned
         * data immediately and not the previous data.
         */
-       truncate_inode_pages_range(&inode->i_data, destoff,
-                                  PAGE_CACHE_ALIGN(destoff + len) - 1);
+       truncate_inode_pages_range(&inode->i_data,
+                               round_down(destoff, PAGE_CACHE_SIZE),
+                               round_up(destoff + len, PAGE_CACHE_SIZE) - 1);
 out_unlock:
        if (!same_inode)
                btrfs_double_inode_unlock(src, inode);
index 619f92963e27102fb47a6f119b65451bdf959bef..c5f1773c4794995eacc909976f62def80a9c64bc 100644 (file)
@@ -280,7 +280,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
        end = start + cache->key.offset - 1;
        btrfs_put_block_group(cache);
 
-       zone = kzalloc(sizeof(*zone), GFP_NOFS);
+       zone = kzalloc(sizeof(*zone), GFP_KERNEL);
        if (!zone)
                return NULL;
 
@@ -343,7 +343,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        if (re)
                return re;
 
-       re = kzalloc(sizeof(*re), GFP_NOFS);
+       re = kzalloc(sizeof(*re), GFP_KERNEL);
        if (!re)
                return NULL;
 
@@ -566,7 +566,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
        if (!re)
                return -1;
 
-       rec = kzalloc(sizeof(*rec), GFP_NOFS);
+       rec = kzalloc(sizeof(*rec), GFP_KERNEL);
        if (!rec) {
                reada_extent_put(root->fs_info, re);
                return -ENOMEM;
@@ -791,7 +791,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
 {
        struct reada_machine_work *rmw;
 
-       rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
+       rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
        if (!rmw) {
                /* FIXME we cannot handle this properly right now */
                BUG();
@@ -926,7 +926,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
                .offset = (u64)-1
        };
 
-       rc = kzalloc(sizeof(*rc), GFP_NOFS);
+       rc = kzalloc(sizeof(*rc), GFP_KERNEL);
        if (!rc)
                return ERR_PTR(-ENOMEM);
 
index fd1c4d982463fd92768537f86f8b9ae8545befba..2bd0011450df2715ca926a6ccd2d99cb7d40f8b4 100644 (file)
@@ -575,7 +575,8 @@ static int is_cowonly_root(u64 root_objectid)
            root_objectid == BTRFS_TREE_LOG_OBJECTID ||
            root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
            root_objectid == BTRFS_UUID_TREE_OBJECTID ||
-           root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
+           root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
+           root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
                return 1;
        return 0;
 }
index 92bf5ee732fb0e14f7e330dddaadccb4b70b35cc..2de7817d0e1be7176a3e3dcf7ad44793fe446e2c 100644 (file)
@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
        int ret;
 
-       sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
+       sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
        if (!sctx)
                goto nomem;
        atomic_set(&sctx->refs, 1);
@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
                struct scrub_bio *sbio;
 
-               sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
+               sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
                if (!sbio)
                        goto nomem;
                sctx->bios[i] = sbio;
@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 again:
        if (!wr_ctx->wr_curr_bio) {
                wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
-                                             GFP_NOFS);
+                                             GFP_KERNEL);
                if (!wr_ctx->wr_curr_bio) {
                        mutex_unlock(&wr_ctx->wr_lock);
                        return -ENOMEM;
@@ -1671,7 +1671,8 @@ again:
                sbio->dev = wr_ctx->tgtdev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+                       bio = btrfs_io_bio_alloc(GFP_KERNEL,
+                                       wr_ctx->pages_per_wr_bio);
                        if (!bio) {
                                mutex_unlock(&wr_ctx->wr_lock);
                                return -ENOMEM;
@@ -2076,7 +2077,8 @@ again:
                sbio->dev = spage->dev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
+                       bio = btrfs_io_bio_alloc(GFP_KERNEL,
+                                       sctx->pages_per_rd_bio);
                        if (!bio)
                                return -ENOMEM;
                        sbio->bio = bio;
@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        struct scrub_block *sblock;
        int index;
 
-       sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+       sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
        if (!sblock) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                struct scrub_page *spage;
                u64 l = min_t(u64, len, PAGE_SIZE);
 
-               spage = kzalloc(sizeof(*spage), GFP_NOFS);
+               spage = kzalloc(sizeof(*spage), GFP_KERNEL);
                if (!spage) {
 leave_nomem:
                        spin_lock(&sctx->stat_lock);
@@ -2286,7 +2288,7 @@ leave_nomem:
                        spage->have_csum = 0;
                }
                sblock->page_count++;
-               spage->page = alloc_page(GFP_NOFS);
+               spage->page = alloc_page(GFP_KERNEL);
                if (!spage->page)
                        goto leave_nomem;
                len -= l;
@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
        struct scrub_block *sblock;
        int index;
 
-       sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+       sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
        if (!sblock) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
                struct scrub_page *spage;
                u64 l = min_t(u64, len, PAGE_SIZE);
 
-               spage = kzalloc(sizeof(*spage), GFP_NOFS);
+               spage = kzalloc(sizeof(*spage), GFP_KERNEL);
                if (!spage) {
 leave_nomem:
                        spin_lock(&sctx->stat_lock);
@@ -2591,7 +2593,7 @@ leave_nomem:
                        spage->have_csum = 0;
                }
                sblock->page_count++;
-               spage->page = alloc_page(GFP_NOFS);
+               spage->page = alloc_page(GFP_KERNEL);
                if (!spage->page)
                        goto leave_nomem;
                len -= l;
index 63a6152be04b4f4265b25e253eadb1d4d91b8969..d2e29925f1da3bc6f3d4ce3846733cd058a301bf 100644 (file)
@@ -304,7 +304,7 @@ static struct fs_path *fs_path_alloc(void)
 {
        struct fs_path *p;
 
-       p = kmalloc(sizeof(*p), GFP_NOFS);
+       p = kmalloc(sizeof(*p), GFP_KERNEL);
        if (!p)
                return NULL;
        p->reversed = 0;
@@ -363,11 +363,11 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
         * First time the inline_buf does not suffice
         */
        if (p->buf == p->inline_buf) {
-               tmp_buf = kmalloc(len, GFP_NOFS);
+               tmp_buf = kmalloc(len, GFP_KERNEL);
                if (tmp_buf)
                        memcpy(tmp_buf, p->buf, old_buf_len);
        } else {
-               tmp_buf = krealloc(p->buf, len, GFP_NOFS);
+               tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
        }
        if (!tmp_buf)
                return -ENOMEM;
@@ -995,7 +995,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
         * values are small.
         */
        buf_len = PATH_MAX;
-       buf = kmalloc(buf_len, GFP_NOFS);
+       buf = kmalloc(buf_len, GFP_KERNEL);
        if (!buf) {
                ret = -ENOMEM;
                goto out;
@@ -1042,7 +1042,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
                                buf = NULL;
                        } else {
                                char *tmp = krealloc(buf, buf_len,
-                                                    GFP_NOFS | __GFP_NOWARN);
+                                               GFP_KERNEL | __GFP_NOWARN);
 
                                if (!tmp)
                                        kfree(buf);
@@ -1303,7 +1303,7 @@ static int find_extent_clone(struct send_ctx *sctx,
        /* We only use this path under the commit sem */
        tmp_path->need_commit_sem = 0;
 
-       backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
+       backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
        if (!backref_ctx) {
                ret = -ENOMEM;
                goto out;
@@ -1984,7 +1984,7 @@ static int name_cache_insert(struct send_ctx *sctx,
        nce_head = radix_tree_lookup(&sctx->name_cache,
                        (unsigned long)nce->ino);
        if (!nce_head) {
-               nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
+               nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
                if (!nce_head) {
                        kfree(nce);
                        return -ENOMEM;
@@ -2179,7 +2179,7 @@ out_cache:
        /*
         * Store the result of the lookup in the name cache.
         */
-       nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
+       nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
        if (!nce) {
                ret = -ENOMEM;
                goto out;
@@ -2315,7 +2315,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
        if (!path)
                return -ENOMEM;
 
-       name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
+       name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
        if (!name) {
                btrfs_free_path(path);
                return -ENOMEM;
@@ -2730,7 +2730,7 @@ static int __record_ref(struct list_head *head, u64 dir,
 {
        struct recorded_ref *ref;
 
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
+       ref = kmalloc(sizeof(*ref), GFP_KERNEL);
        if (!ref)
                return -ENOMEM;
 
@@ -2755,7 +2755,7 @@ static int dup_ref(struct recorded_ref *ref, struct list_head *list)
 {
        struct recorded_ref *new;
 
-       new = kmalloc(sizeof(*ref), GFP_NOFS);
+       new = kmalloc(sizeof(*ref), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -2818,7 +2818,7 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        struct rb_node *parent = NULL;
        struct orphan_dir_info *entry, *odi;
 
-       odi = kmalloc(sizeof(*odi), GFP_NOFS);
+       odi = kmalloc(sizeof(*odi), GFP_KERNEL);
        if (!odi)
                return ERR_PTR(-ENOMEM);
        odi->ino = dir_ino;
@@ -2973,7 +2973,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
        struct rb_node *parent = NULL;
        struct waiting_dir_move *entry, *dm;
 
-       dm = kmalloc(sizeof(*dm), GFP_NOFS);
+       dm = kmalloc(sizeof(*dm), GFP_KERNEL);
        if (!dm)
                return -ENOMEM;
        dm->ino = ino;
@@ -3040,7 +3040,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
        int exists = 0;
        int ret;
 
-       pm = kmalloc(sizeof(*pm), GFP_NOFS);
+       pm = kmalloc(sizeof(*pm), GFP_KERNEL);
        if (!pm)
                return -ENOMEM;
        pm->parent_ino = parent_ino;
@@ -4280,7 +4280,7 @@ static int __find_xattr(int num, struct btrfs_key *di_key,
            strncmp(name, ctx->name, name_len) == 0) {
                ctx->found_idx = num;
                ctx->found_data_len = data_len;
-               ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
+               ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
                if (!ctx->found_data)
                        return -ENOMEM;
                return 1;
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
        while (index <= last_index) {
                unsigned cur_len = min_t(unsigned, len,
                                         PAGE_CACHE_SIZE - pg_offset);
-               page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+               page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
                if (!page) {
                        ret = -ENOMEM;
                        break;
@@ -5989,7 +5989,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
                goto out;
        }
 
-       sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
+       sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
        if (!sctx) {
                ret = -ENOMEM;
                goto out;
@@ -5997,7 +5997,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
 
        INIT_LIST_HEAD(&sctx->new_refs);
        INIT_LIST_HEAD(&sctx->deleted_refs);
-       INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
+       INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
        INIT_LIST_HEAD(&sctx->name_cache_list);
 
        sctx->flags = arg->flags;
index e0ac85949067c30191ce3635e2a65edbe8a56361..539e7b5e3f86a83059f070409fdb36749a958ce4 100644 (file)
@@ -202,6 +202,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
 BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
 BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
 BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
+BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
 
 static struct attribute *btrfs_supported_feature_attrs[] = {
        BTRFS_FEAT_ATTR_PTR(mixed_backref),
@@ -213,6 +214,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
        BTRFS_FEAT_ATTR_PTR(raid56),
        BTRFS_FEAT_ATTR_PTR(skinny_metadata),
        BTRFS_FEAT_ATTR_PTR(no_holes),
+       BTRFS_FEAT_ATTR_PTR(free_space_tree),
        NULL
 };
 
@@ -780,6 +782,39 @@ failure:
        return error;
 }
 
+
+/*
+ * Change per-fs features in /sys/fs/btrfs/UUID/features to match current
+ * values in superblock. Call after any changes to incompat/compat_ro flags
+ */
+void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+               u64 bit, enum btrfs_feature_set set)
+{
+       struct btrfs_fs_devices *fs_devs;
+       struct kobject *fsid_kobj;
+       u64 features;
+       int ret;
+
+       if (!fs_info)
+               return;
+
+       features = get_features(fs_info, set);
+       ASSERT(bit & supported_feature_masks[set]);
+
+       fs_devs = fs_info->fs_devices;
+       fsid_kobj = &fs_devs->fsid_kobj;
+
+       if (!fsid_kobj->state_initialized)
+               return;
+
+       /*
+        * FIXME: this is too heavy to update just one value, ideally we'd like
+        * to use sysfs_update_group but some refactoring is needed first.
+        */
+       sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group);
+       ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
+}
+
 static int btrfs_init_debugfs(void)
 {
 #ifdef CONFIG_DEBUG_FS
index 9c09522125a6b26d0577e139c38ebad7ca5c8331..d7da1a4c2f6c12d04a8f29e4d8f503943058d46d 100644 (file)
@@ -56,7 +56,7 @@ static struct btrfs_feature_attr btrfs_attr_##_name = {                            \
 #define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
        BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature)
 #define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \
-       BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT, feature)
+       BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature)
 #define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \
        BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
 
@@ -90,4 +90,7 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
                                struct kobject *parent);
 int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs);
 void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
+void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+               u64 bit, enum btrfs_feature_set set);
+
 #endif /* _BTRFS_SYSFS_H_ */
index b1d920b3007017c2014d28217cb5efa1d7ed4d96..0e1e61a7ec23265d292e165fd39f1f8d2080f604 100644 (file)
@@ -82,18 +82,18 @@ void btrfs_destroy_test_fs(void)
 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
 {
        struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
-                                               GFP_NOFS);
+                                               GFP_KERNEL);
 
        if (!fs_info)
                return fs_info;
        fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
-                                     GFP_NOFS);
+                                     GFP_KERNEL);
        if (!fs_info->fs_devices) {
                kfree(fs_info);
                return NULL;
        }
        fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
-                                     GFP_NOFS);
+                                     GFP_KERNEL);
        if (!fs_info->super_copy) {
                kfree(fs_info->fs_devices);
                kfree(fs_info);
@@ -180,11 +180,11 @@ btrfs_alloc_dummy_block_group(unsigned long length)
 {
        struct btrfs_block_group_cache *cache;
 
-       cache = kzalloc(sizeof(*cache), GFP_NOFS);
+       cache = kzalloc(sizeof(*cache), GFP_KERNEL);
        if (!cache)
                return NULL;
        cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
-                                       GFP_NOFS);
+                                       GFP_KERNEL);
        if (!cache->free_space_ctl) {
                kfree(cache);
                return NULL;
index e29fa297e053951a2d2d178a8793c9d49e021023..669b58201e36881fb1434d76fd787a45329f042e 100644 (file)
@@ -94,7 +94,7 @@ static int test_find_delalloc(void)
         * test.
         */
        for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
-               page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+               page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
                if (!page) {
                        test_msg("Failed to allocate test page\n");
                        ret = -ENOMEM;
@@ -113,7 +113,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_NOFS);
+       set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -144,7 +144,7 @@ static int test_find_delalloc(void)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_NOFS);
+       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -199,7 +199,7 @@ static int test_find_delalloc(void)
         *
         * We are re-using our test_start from above since it works out well.
         */
-       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_NOFS);
+       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -262,7 +262,7 @@ static int test_find_delalloc(void)
        }
        ret = 0;
 out_bits:
-       clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS);
+       clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
 out:
        if (locked_page)
                page_cache_release(locked_page);
@@ -360,7 +360,7 @@ static int test_eb_bitmaps(void)
 
        test_msg("Running extent buffer bitmap tests\n");
 
-       bitmap = kmalloc(len, GFP_NOFS);
+       bitmap = kmalloc(len, GFP_KERNEL);
        if (!bitmap) {
                test_msg("Couldn't allocate test bitmap\n");
                return -ENOMEM;
index 5de55fdd28bcda6aca3d0a2becf5bed8e96f1b63..e2d3da02deee4b73b9f9c96ed1e6cc9f4c1b5700 100644 (file)
@@ -974,7 +974,7 @@ static int test_extent_accounting(void)
                               (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
                               EXTENT_DELALLOC | EXTENT_DIRTY |
                               EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
-                              NULL, GFP_NOFS);
+                              NULL, GFP_KERNEL);
        if (ret) {
                test_msg("clear_extent_bit returned %d\n", ret);
                goto out;
@@ -1045,7 +1045,7 @@ static int test_extent_accounting(void)
                               BTRFS_MAX_EXTENT_SIZE+8191,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
-                              NULL, GFP_NOFS);
+                              NULL, GFP_KERNEL);
        if (ret) {
                test_msg("clear_extent_bit returned %d\n", ret);
                goto out;
@@ -1079,7 +1079,7 @@ static int test_extent_accounting(void)
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
-                              NULL, GFP_NOFS);
+                              NULL, GFP_KERNEL);
        if (ret) {
                test_msg("clear_extent_bit returned %d\n", ret);
                goto out;
@@ -1096,7 +1096,7 @@ out:
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC |
                                 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
-                                NULL, GFP_NOFS);
+                                NULL, GFP_KERNEL);
        iput(inode);
        btrfs_free_dummy_root(root);
        return ret;
index 323e12cc9d2f522388fe929ed66d4dd946b5881d..978c3a8108936381309de4681e90400aeb86874f 100644 (file)
@@ -4127,7 +4127,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
                                     struct inode *inode,
                                     struct btrfs_path *path,
                                     struct list_head *logged_list,
-                                    struct btrfs_log_ctx *ctx)
+                                    struct btrfs_log_ctx *ctx,
+                                    const u64 start,
+                                    const u64 end)
 {
        struct extent_map *em, *n;
        struct list_head extents;
@@ -4166,7 +4168,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        }
 
        list_sort(NULL, &extents, extent_cmp);
-
+       /*
+        * Collect any new ordered extents within the range. This is to
+        * prevent logging file extent items without waiting for the disk
+        * location they point to being written. We do this only to deal
+        * with races against concurrent lockless direct IO writes.
+        */
+       btrfs_get_logged_extents(inode, logged_list, start, end);
 process:
        while (!list_empty(&extents)) {
                em = list_entry(extents.next, struct extent_map, list);
@@ -4701,7 +4709,7 @@ log_extents:
                        goto out_unlock;
                }
                ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
-                                               &logged_list, ctx);
+                                               &logged_list, ctx, start, end);
                if (ret) {
                        err = ret;
                        goto out_unlock;
index 86a9c383955e56037eb38419b1e4617317d32237..eb9028e8cfc5197ef1ea99524fcbc67d67b4df91 100644 (file)
@@ -698,8 +698,8 @@ static void ceph_aio_retry_work(struct work_struct *work)
 
        req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
                        false, GFP_NOFS);
-       if (IS_ERR(req)) {
-               ret = PTR_ERR(req);
+       if (!req) {
+               ret = -ENOMEM;
                req = orig_req;
                goto out;
        }
@@ -716,7 +716,6 @@ static void ceph_aio_retry_work(struct work_struct *work)
        ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
                                snapc, CEPH_NOSNAP, &aio_req->mtime);
 
-       ceph_put_snap_context(snapc);
        ceph_osdc_put_request(orig_req);
 
        req->r_callback = ceph_aio_complete_req;
@@ -731,6 +730,7 @@ out:
                ceph_aio_complete_req(req, NULL);
        }
 
+       ceph_put_snap_context(snapc);
        kfree(aio_work);
 }
 
index a5b8eb69a8f42526d4fb4d46d1cc26d33e78e6b2..6402eaf8ab958f27cc3371d0773ad45046dc0b8e 100644 (file)
@@ -1261,6 +1261,9 @@ COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
 COMPATIBLE_IOCTL(HCIINQUIRY)
 COMPATIBLE_IOCTL(HCIUARTSETPROTO)
 COMPATIBLE_IOCTL(HCIUARTGETPROTO)
+COMPATIBLE_IOCTL(HCIUARTGETDEVICE)
+COMPATIBLE_IOCTL(HCIUARTSETFLAGS)
+COMPATIBLE_IOCTL(HCIUARTGETFLAGS)
 COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
 COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
 COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
index 4fd6b0c5c6b50d29097e016bcd205ab72b758225..d8d3cc9323a1658637341d280547cda96c670c9e 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -58,6 +58,26 @@ static void dax_unmap_atomic(struct block_device *bdev,
        blk_queue_exit(bdev->bd_queue);
 }
 
+struct page *read_dax_sector(struct block_device *bdev, sector_t n)
+{
+       struct page *page = alloc_pages(GFP_KERNEL, 0);
+       struct blk_dax_ctl dax = {
+               .size = PAGE_SIZE,
+               .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
+       };
+       long rc;
+
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       rc = dax_map_atomic(bdev, &dax);
+       if (rc < 0)
+               return ERR_PTR(rc);
+       memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
+       dax_unmap_atomic(bdev, &dax);
+       return page;
+}
+
 /*
  * dax_clear_blocks() is called from within transaction context from XFS,
  * and hence this means the stack from this point must follow GFP_NOFS
@@ -267,8 +287,13 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
        if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
                inode_unlock(inode);
 
-       if ((retval > 0) && end_io)
-               end_io(iocb, pos, retval, bh.b_private);
+       if (end_io) {
+               int err;
+
+               err = end_io(iocb, pos, retval, bh.b_private);
+               if (err)
+                       retval = err;
+       }
 
        if (!(flags & DIO_SKIP_DIO_COUNT))
                inode_dio_end(inode);
@@ -338,7 +363,8 @@ static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
        void *entry;
 
        WARN_ON_ONCE(pmd_entry && !dirty);
-       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       if (dirty)
+               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
        spin_lock_irq(&mapping->tree_lock);
 
index 1f107fd513286f0f3e8be601cd715ea0ee5443e8..655f21f991606b5bf2bef600a54b814994dbed98 100644 (file)
@@ -575,6 +575,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
        mutex_unlock(&allocated_ptys_lock);
 }
 
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+       atomic_inc(&sb->s_active);
+       ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+       iput(ptmx_inode);
+       deactivate_super(sb);
+}
+
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
index 1b2f7ffc8b841fd16cf312874fe8c7d4c0fa0e8e..9c6f885cc5186cc2b5a7f30187e750d7b6a979db 100644 (file)
@@ -253,8 +253,13 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
        if (ret == 0)
                ret = transferred;
 
-       if (dio->end_io && dio->result)
-               dio->end_io(dio->iocb, offset, transferred, dio->private);
+       if (dio->end_io) {
+               int err;
+
+               err = dio->end_io(dio->iocb, offset, ret, dio->private);
+               if (err)
+                       ret = err;
+       }
 
        if (!(dio->flags & DIO_SKIP_DIO_COUNT))
                inode_dio_end(dio->inode);
index 8b0b4a73116d07f354b45237e62662961594fa0d..5fe2cdb4898806e6e00ec95e88a5d19821a293ae 100644 (file)
@@ -738,8 +738,7 @@ static void ecryptfs_free_kmem_caches(void)
                struct ecryptfs_cache_info *info;
 
                info = &ecryptfs_cache_infos[i];
-               if (*(info->cache))
-                       kmem_cache_destroy(*(info->cache));
+               kmem_cache_destroy(*(info->cache));
        }
 }
 
index ae1dbcf47e979d48b67ee83e1ec413e4d119a48b..cde60741cad2c4cc429f04e80f29e788b0e0d673 100644 (file)
 /* Epoll private bits inside the event mask */
 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
 
+#define EPOLLINOUT_BITS (POLLIN | POLLOUT)
+
+#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \
+                               EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
+
 /* Maximum number of nesting allowed inside epoll sets */
 #define EP_MAX_NESTS 4
 
@@ -1068,7 +1073,22 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
         * wait list.
         */
        if (waitqueue_active(&ep->wq)) {
-               ewake = 1;
+               if ((epi->event.events & EPOLLEXCLUSIVE) &&
+                                       !((unsigned long)key & POLLFREE)) {
+                       switch ((unsigned long)key & EPOLLINOUT_BITS) {
+                       case POLLIN:
+                               if (epi->event.events & POLLIN)
+                                       ewake = 1;
+                               break;
+                       case POLLOUT:
+                               if (epi->event.events & POLLOUT)
+                                       ewake = 1;
+                               break;
+                       case 0:
+                               ewake = 1;
+                               break;
+                       }
+               }
                wake_up_locked(&ep->wq);
        }
        if (waitqueue_active(&ep->poll_wait))
@@ -1875,9 +1895,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
         * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
         * Also, we do not currently supported nested exclusive wakeups.
         */
-       if ((epds.events & EPOLLEXCLUSIVE) && (op == EPOLL_CTL_MOD ||
-               (op == EPOLL_CTL_ADD && is_file_epoll(tf.file))))
-               goto error_tgt_fput;
+       if (epds.events & EPOLLEXCLUSIVE) {
+               if (op == EPOLL_CTL_MOD)
+                       goto error_tgt_fput;
+               if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
+                               (epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
+                       goto error_tgt_fput;
+       }
 
        /*
         * At this point it is safe to assume that the "private_data" contains
@@ -1950,8 +1974,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                break;
        case EPOLL_CTL_MOD:
                if (epi) {
-                       epds.events |= POLLERR | POLLHUP;
-                       error = ep_modify(ep, epi, &epds);
+                       if (!(epi->event.events & EPOLLEXCLUSIVE)) {
+                               epds.events |= POLLERR | POLLHUP;
+                               error = ep_modify(ep, epi, &epds);
+                       }
                } else
                        error = -ENOENT;
                break;
index 845c22a5a7448998fcbc430b4fad847979c064d6..04efc304c9c147b08089dfa66948a4d2f809584b 100644 (file)
@@ -465,3 +465,47 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
                return size;
        return 0;
 }
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct inode *dir = d_inode(dentry->d_parent);
+       struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+       int inode_has_key, encrypted_filename;
+
+       if (!ext4_encrypted_inode(dir))
+               return 0;
+
+       if (ci && ci->ci_keyring_key &&
+           (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+                                         (1 << KEY_FLAG_REVOKED) |
+                                         (1 << KEY_FLAG_DEAD))))
+               ci = NULL;
+
+       /* this should eventually be an flag in d_flags */
+       encrypted_filename = dentry->d_fsdata != NULL;
+       inode_has_key = (ci != NULL);
+
+       /*
+        * If this is an encrypted file name, and it is a negative
+        * dentry, it might be a valid name.  We can't check if the
+        * key has since been made available due to locking reasons,
+        * so we fail the validation so ext4_lookup() can do this
+        * check.
+        *
+        * We also fail the validation if the dentry is for a
+        * decrypted filename, but we no longer have the key.
+        */
+       if ((encrypted_filename && d_is_negative(dentry)) ||
+           (!encrypted_filename && !inode_has_key))
+               return 0;
+       return 1;
+}
+
+const struct dentry_operations ext4_encrypted_d_ops = {
+       .d_revalidate = ext4_d_revalidate,
+};
index 1d1bca74f84437172d96c26e648e6ed45e129725..6d17f31a31d7479cae271f6d043ec9cb475e348e 100644 (file)
@@ -111,6 +111,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
        int dir_has_error = 0;
        struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
 
+       if (ext4_encrypted_inode(inode)) {
+               err = ext4_get_encryption_info(inode);
+               if (err && err != -ENOKEY)
+                       return err;
+       }
+
        if (is_dx_dir(inode)) {
                err = ext4_dx_readdir(file, ctx);
                if (err != ERR_BAD_DX_DIR) {
index 0662b285dc8a71982a54e5895d58d797c7bcf6a4..157b458a69d4b7c334f28b80c37fd5d11c9f0c25 100644 (file)
@@ -2302,6 +2302,7 @@ struct page *ext4_encrypt(struct inode *inode,
 int ext4_decrypt(struct page *page);
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
                           ext4_fsblk_t pblk, ext4_lblk_t len);
+extern const struct dentry_operations ext4_encrypted_d_ops;
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 int ext4_init_crypto(void);
index 1126436dada19519b97240bcdb42995acae46724..474f1a4d2ca8f45be1c0c97ce8c13a90cf75d34d 100644 (file)
@@ -350,6 +350,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
        struct super_block *sb = inode->i_sb;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct vfsmount *mnt = filp->f_path.mnt;
+       struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
        struct path path;
        char buf[64], *cp;
        int ret;
@@ -393,6 +394,14 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
                if (ext4_encryption_info(inode) == NULL)
                        return -ENOKEY;
        }
+       if (ext4_encrypted_inode(dir) &&
+           !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+               ext4_warning(inode->i_sb,
+                            "Inconsistent encryption contexts: %lu/%lu\n",
+                            (unsigned long) dir->i_ino,
+                            (unsigned long) inode->i_ino);
+               return -EPERM;
+       }
        /*
         * Set up the jbd2_inode if we are opening the inode for
         * writing and the journal is present
index 83bc8bfb3bea8eeefed38ca46ae8260779222405..9db04dd9b88a6e45782485ccf28844804294b103 100644 (file)
@@ -3161,14 +3161,17 @@ out:
 }
 #endif
 
-static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                            ssize_t size, void *private)
 {
         ext4_io_end_t *io_end = iocb->private;
 
+       if (size <= 0)
+               return 0;
+
        /* if not async direct IO just return */
        if (!io_end)
-               return;
+               return 0;
 
        ext_debug("ext4_end_io_dio(): io_end 0x%p "
                  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3179,6 +3182,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        io_end->offset = offset;
        io_end->size = size;
        ext4_put_io_end(io_end);
+
+       return 0;
 }
 
 /*
index 06574dd77614a3b1c4396d5bb3c017c681209d00..48e4b8907826eca52a1e94e14bdfdb0fd2240c56 100644 (file)
@@ -1558,6 +1558,24 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
        struct ext4_dir_entry_2 *de;
        struct buffer_head *bh;
 
+       if (ext4_encrypted_inode(dir)) {
+               int res = ext4_get_encryption_info(dir);
+
+               /*
+                * This should be a properly defined flag for
+                * dentry->d_flags when we uplift this to the VFS.
+                * d_fsdata is set to (void *) 1 if if the dentry is
+                * created while the directory was encrypted and we
+                * don't have access to the key.
+                */
+              dentry->d_fsdata = NULL;
+              if (ext4_encryption_info(dir))
+                      dentry->d_fsdata = (void *) 1;
+              d_set_d_op(dentry, &ext4_encrypted_d_ops);
+              if (res && res != -ENOKEY)
+                      return ERR_PTR(res);
+       }
+
        if (dentry->d_name.len > EXT4_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
@@ -1585,11 +1603,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
                        return ERR_PTR(-EFSCORRUPTED);
                }
                if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
-                   (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-                    S_ISLNK(inode->i_mode)) &&
+                   (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
                    !ext4_is_child_context_consistent_with_parent(dir,
                                                                  inode)) {
+                       int nokey = ext4_encrypted_inode(inode) &&
+                               !ext4_encryption_info(inode);
+
                        iput(inode);
+                       if (nokey)
+                               return ERR_PTR(-ENOKEY);
                        ext4_warning(inode->i_sb,
                                     "Inconsistent encryption contexts: %lu/%lu\n",
                                     (unsigned long) dir->i_ino,
index 3ed01ec011d75067579a4b894b4b0623af265a2d..b5bcbddceb91c981a43cb5ae66802d1e35eca5a1 100644 (file)
@@ -1132,6 +1132,7 @@ static const struct dquot_operations ext4_quota_operations = {
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
        .get_projid     = ext4_get_projid,
+       .get_next_id    = dquot_get_next_id,
 };
 
 static const struct quotactl_ops ext4_qctl_operations = {
index 3842af954cd5bc127f2f0aad13d0ff52b743779d..536bec99bd64d39570edd0f08ac5545e79ade5a2 100644 (file)
@@ -39,7 +39,7 @@ repeat:
                cond_resched();
                goto repeat;
        }
-       f2fs_wait_on_page_writeback(page, META);
+       f2fs_wait_on_page_writeback(page, META, true);
        SetPageUptodate(page);
        return page;
 }
@@ -232,13 +232,17 @@ static int f2fs_write_meta_page(struct page *page,
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       f2fs_wait_on_page_writeback(page, META);
        write_meta_page(sbi, page);
        dec_page_count(sbi, F2FS_DIRTY_META);
+
+       if (wbc->for_reclaim)
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
+
        unlock_page(page);
 
-       if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi)))
+       if (unlikely(f2fs_cp_error(sbi)))
                f2fs_submit_merged_bio(sbi, META, WRITE);
+
        return 0;
 
 redirty_out:
@@ -252,13 +256,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        long diff, written;
 
-       trace_f2fs_writepages(mapping->host, wbc, META);
-
        /* collect a number of dirty meta pages and write together */
        if (wbc->for_kupdate ||
                get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
                goto skip_write;
 
+       trace_f2fs_writepages(mapping->host, wbc, META);
+
        /* if mounting is failed, skip writing node pages */
        mutex_lock(&sbi->cp_mutex);
        diff = nr_pages_to_write(sbi, META, wbc);
@@ -269,6 +273,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
 
 skip_write:
        wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+       trace_f2fs_writepages(mapping->host, wbc, META);
        return 0;
 }
 
@@ -315,6 +320,9 @@ continue_unlock:
                                goto continue_unlock;
                        }
 
+                       f2fs_wait_on_page_writeback(page, META, true);
+
+                       BUG_ON(PageWriteback(page));
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
@@ -921,6 +929,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        int cp_payload_blks = __cp_payload(sbi);
        block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
        bool invalidate = false;
+       struct super_block *sb = sbi->sb;
+       struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+       u64 kbytes_written;
 
        /*
         * This avoids to conduct wrong roll-forward operations and uses
@@ -1034,6 +1045,14 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
        write_data_summaries(sbi, start_blk);
        start_blk += data_sum_blocks;
+
+       /* Record write statistics in the hot node summary */
+       kbytes_written = sbi->kbytes_written;
+       if (sb->s_bdev->bd_part)
+               kbytes_written += BD_PART_WRITTEN(sbi);
+
+       seg_i->sum_blk->info.kbytes_written = cpu_to_le64(kbytes_written);
+
        if (__remain_node_summaries(cpc->reason)) {
                write_node_summaries(sbi, start_blk);
                start_blk += NR_CURSEG_NODE_TYPE;
index 5c06db17e41fa267f5b270061d2959b2a36803e4..03f948e84115df85ea68faadb856c7c3eef3383c 100644 (file)
@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
                f2fs_restore_and_release_control_page(&page);
 
                if (unlikely(bio->bi_error)) {
-                       set_page_dirty(page);
                        set_bit(AS_EIO, &page->mapping->flags);
                        f2fs_stop_checkpoint(sbi);
                }
@@ -75,8 +74,7 @@ static void f2fs_write_end_io(struct bio *bio)
                dec_page_count(sbi, F2FS_WRITEBACK);
        }
 
-       if (!get_pages(sbi, F2FS_WRITEBACK) &&
-                       !list_empty(&sbi->cp_wait.task_list))
+       if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait))
                wake_up(&sbi->cp_wait);
 
        bio_put(bio);
@@ -116,8 +114,60 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
        io->bio = NULL;
 }
 
-void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
-                               enum page_type type, int rw)
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+                                               struct page *page, nid_t ino)
+{
+       struct bio_vec *bvec;
+       struct page *target;
+       int i;
+
+       if (!io->bio)
+               return false;
+
+       if (!inode && !page && !ino)
+               return true;
+
+       bio_for_each_segment_all(bvec, io->bio, i) {
+
+               if (bvec->bv_page->mapping) {
+                       target = bvec->bv_page;
+               } else {
+                       struct f2fs_crypto_ctx *ctx;
+
+                       /* encrypted page */
+                       ctx = (struct f2fs_crypto_ctx *)page_private(
+                                                               bvec->bv_page);
+                       target = ctx->w.control_page;
+               }
+
+               if (inode && inode == target->mapping->host)
+                       return true;
+               if (page && page == target)
+                       return true;
+               if (ino && ino == ino_of_node(target))
+                       return true;
+       }
+
+       return false;
+}
+
+static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
+                                               struct page *page, nid_t ino,
+                                               enum page_type type)
+{
+       enum page_type btype = PAGE_TYPE_OF_BIO(type);
+       struct f2fs_bio_info *io = &sbi->write_io[btype];
+       bool ret;
+
+       down_read(&io->io_rwsem);
+       ret = __has_merged_page(io, inode, page, ino);
+       up_read(&io->io_rwsem);
+       return ret;
+}
+
+static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
+                               struct inode *inode, struct page *page,
+                               nid_t ino, enum page_type type, int rw)
 {
        enum page_type btype = PAGE_TYPE_OF_BIO(type);
        struct f2fs_bio_info *io;
@@ -126,6 +176,9 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
 
        down_write(&io->io_rwsem);
 
+       if (!__has_merged_page(io, inode, page, ino))
+               goto out;
+
        /* change META to META_FLUSH in the checkpoint procedure */
        if (type >= META_FLUSH) {
                io->fio.type = META_FLUSH;
@@ -135,9 +188,24 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
                        io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
        }
        __submit_merged_bio(io);
+out:
        up_write(&io->io_rwsem);
 }
 
+void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
+                                                                       int rw)
+{
+       __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
+}
+
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+                               struct inode *inode, struct page *page,
+                               nid_t ino, enum page_type type, int rw)
+{
+       if (has_merged_page(sbi, inode, page, ino, type))
+               __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
+}
+
 /*
  * Fill the locked page with data located in the block address.
  * Return unlocked page.
@@ -218,7 +286,7 @@ void set_data_blkaddr(struct dnode_of_data *dn)
        struct page *node_page = dn->node_page;
        unsigned int ofs_in_node = dn->ofs_in_node;
 
-       f2fs_wait_on_page_writeback(node_page, NODE);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
 
        rn = F2FS_NODE(node_page);
 
@@ -461,7 +529,6 @@ got_it:
 static int __allocate_data_block(struct dnode_of_data *dn)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
-       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
        struct f2fs_summary sum;
        struct node_info ni;
        int seg = CURSEG_WARM_DATA;
@@ -489,7 +556,7 @@ alloc:
        set_data_blkaddr(dn);
 
        /* update i_size */
-       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
                                                        dn->ofs_in_node;
        if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
                i_size_write(dn->inode,
@@ -497,67 +564,33 @@ alloc:
        return 0;
 }
 
-static int __allocate_data_blocks(struct inode *inode, loff_t offset,
-                                                       size_t count)
+ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 {
-       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       struct dnode_of_data dn;
-       u64 start = F2FS_BYTES_TO_BLK(offset);
-       u64 len = F2FS_BYTES_TO_BLK(count);
-       bool allocated;
-       u64 end_offset;
-       int err = 0;
-
-       while (len) {
-               f2fs_lock_op(sbi);
-
-               /* When reading holes, we need its node page */
-               set_new_dnode(&dn, inode, NULL, NULL, 0);
-               err = get_dnode_of_data(&dn, start, ALLOC_NODE);
-               if (err)
-                       goto out;
-
-               allocated = false;
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
-
-               while (dn.ofs_in_node < end_offset && len) {
-                       block_t blkaddr;
-
-                       if (unlikely(f2fs_cp_error(sbi))) {
-                               err = -EIO;
-                               goto sync_out;
-                       }
-
-                       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
-                       if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
-                               err = __allocate_data_block(&dn);
-                               if (err)
-                                       goto sync_out;
-                               allocated = true;
-                       }
-                       len--;
-                       start++;
-                       dn.ofs_in_node++;
-               }
+       struct inode *inode = file_inode(iocb->ki_filp);
+       struct f2fs_map_blocks map;
+       ssize_t ret = 0;
 
-               if (allocated)
-                       sync_inode_page(&dn);
+       map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
+       map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from));
+       map.m_next_pgofs = NULL;
 
-               f2fs_put_dnode(&dn);
-               f2fs_unlock_op(sbi);
+       if (f2fs_encrypted_inode(inode))
+               return 0;
 
-               f2fs_balance_fs(sbi, dn.node_changed);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ret = f2fs_convert_inline_inode(inode);
+               if (ret)
+                       return ret;
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
        }
-       return err;
-
-sync_out:
-       if (allocated)
-               sync_inode_page(&dn);
-       f2fs_put_dnode(&dn);
-out:
-       f2fs_unlock_op(sbi);
-       f2fs_balance_fs(sbi, dn.node_changed);
-       return err;
+       if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+               ret = f2fs_convert_inline_inode(inode);
+               if (ret)
+                       return ret;
+       }
+       if (!f2fs_has_inline_data(inode))
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+       return ret;
 }
 
 /*
@@ -588,13 +621,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
        /* it only supports block size == page size */
        pgofs = (pgoff_t)map->m_lblk;
 
-       if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+       if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
                map->m_pblk = ei.blk + pgofs - ei.fofs;
                map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
                map->m_flags = F2FS_MAP_MAPPED;
                goto out;
        }
 
+next_dnode:
        if (create)
                f2fs_lock_op(sbi);
 
@@ -602,120 +636,98 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, pgofs, mode);
        if (err) {
-               if (err == -ENOENT)
+               if (err == -ENOENT) {
                        err = 0;
+                       if (map->m_next_pgofs)
+                               *map->m_next_pgofs =
+                                       get_next_page_offset(&dn, pgofs);
+               }
                goto unlock_out;
        }
 
-       if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+       end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+next_block:
+       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+
+       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
                if (create) {
                        if (unlikely(f2fs_cp_error(sbi))) {
                                err = -EIO;
-                               goto put_out;
+                               goto sync_out;
+                       }
+                       if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+                               if (blkaddr == NULL_ADDR)
+                                       err = reserve_new_block(&dn);
+                       } else {
+                               err = __allocate_data_block(&dn);
                        }
-                       err = __allocate_data_block(&dn);
                        if (err)
-                               goto put_out;
+                               goto sync_out;
                        allocated = true;
                        map->m_flags = F2FS_MAP_NEW;
+                       blkaddr = dn.data_blkaddr;
                } else {
+                       if (flag == F2FS_GET_BLOCK_FIEMAP &&
+                                               blkaddr == NULL_ADDR) {
+                               if (map->m_next_pgofs)
+                                       *map->m_next_pgofs = pgofs + 1;
+                       }
                        if (flag != F2FS_GET_BLOCK_FIEMAP ||
-                                               dn.data_blkaddr != NEW_ADDR) {
+                                               blkaddr != NEW_ADDR) {
                                if (flag == F2FS_GET_BLOCK_BMAP)
                                        err = -ENOENT;
-                               goto put_out;
+                               goto sync_out;
                        }
-
-                       /*
-                        * preallocated unwritten block should be mapped
-                        * for fiemap.
-                        */
-                       if (dn.data_blkaddr == NEW_ADDR)
-                               map->m_flags = F2FS_MAP_UNWRITTEN;
                }
        }
 
-       map->m_flags |= F2FS_MAP_MAPPED;
-       map->m_pblk = dn.data_blkaddr;
-       map->m_len = 1;
+       if (map->m_len == 0) {
+               /* preallocated unwritten block should be mapped for fiemap. */
+               if (blkaddr == NEW_ADDR)
+                       map->m_flags |= F2FS_MAP_UNWRITTEN;
+               map->m_flags |= F2FS_MAP_MAPPED;
+
+               map->m_pblk = blkaddr;
+               map->m_len = 1;
+       } else if ((map->m_pblk != NEW_ADDR &&
+                       blkaddr == (map->m_pblk + ofs)) ||
+                       (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+                       flag == F2FS_GET_BLOCK_PRE_DIO ||
+                       flag == F2FS_GET_BLOCK_PRE_AIO) {
+               ofs++;
+               map->m_len++;
+       } else {
+               goto sync_out;
+       }
 
-       end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
        dn.ofs_in_node++;
        pgofs++;
 
-get_next:
-       if (map->m_len >= maxblocks)
-               goto sync_out;
+       if (map->m_len < maxblocks) {
+               if (dn.ofs_in_node < end_offset)
+                       goto next_block;
 
-       if (dn.ofs_in_node >= end_offset) {
                if (allocated)
                        sync_inode_page(&dn);
-               allocated = false;
                f2fs_put_dnode(&dn);
 
                if (create) {
                        f2fs_unlock_op(sbi);
-                       f2fs_balance_fs(sbi, dn.node_changed);
-                       f2fs_lock_op(sbi);
-               }
-
-               set_new_dnode(&dn, inode, NULL, NULL, 0);
-               err = get_dnode_of_data(&dn, pgofs, mode);
-               if (err) {
-                       if (err == -ENOENT)
-                               err = 0;
-                       goto unlock_out;
+                       f2fs_balance_fs(sbi, allocated);
                }
-
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
-       }
-
-       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
-
-       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
-               if (create) {
-                       if (unlikely(f2fs_cp_error(sbi))) {
-                               err = -EIO;
-                               goto sync_out;
-                       }
-                       err = __allocate_data_block(&dn);
-                       if (err)
-                               goto sync_out;
-                       allocated = true;
-                       map->m_flags |= F2FS_MAP_NEW;
-                       blkaddr = dn.data_blkaddr;
-               } else {
-                       /*
-                        * we only merge preallocated unwritten blocks
-                        * for fiemap.
-                        */
-                       if (flag != F2FS_GET_BLOCK_FIEMAP ||
-                                       blkaddr != NEW_ADDR)
-                               goto sync_out;
-               }
-       }
-
-       /* Give more consecutive addresses for the readahead */
-       if ((map->m_pblk != NEW_ADDR &&
-                       blkaddr == (map->m_pblk + ofs)) ||
-                       (map->m_pblk == NEW_ADDR &&
-                       blkaddr == NEW_ADDR)) {
-               ofs++;
-               dn.ofs_in_node++;
-               pgofs++;
-               map->m_len++;
-               goto get_next;
+               allocated = false;
+               goto next_dnode;
        }
 
 sync_out:
        if (allocated)
                sync_inode_page(&dn);
-put_out:
        f2fs_put_dnode(&dn);
 unlock_out:
        if (create) {
                f2fs_unlock_op(sbi);
-               f2fs_balance_fs(sbi, dn.node_changed);
+               f2fs_balance_fs(sbi, allocated);
        }
 out:
        trace_f2fs_map_blocks(inode, map, err);
@@ -723,13 +735,15 @@ out:
 }
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh, int create, int flag)
+                       struct buffer_head *bh, int create, int flag,
+                       pgoff_t *next_pgofs)
 {
        struct f2fs_map_blocks map;
        int ret;
 
        map.m_lblk = iblock;
        map.m_len = bh->b_size >> inode->i_blkbits;
+       map.m_next_pgofs = next_pgofs;
 
        ret = f2fs_map_blocks(inode, &map, create, flag);
        if (!ret) {
@@ -741,16 +755,18 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
 }
 
 static int get_data_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh_result, int create, int flag)
+                       struct buffer_head *bh_result, int create, int flag,
+                       pgoff_t *next_pgofs)
 {
-       return __get_data_block(inode, iblock, bh_result, create, flag);
+       return __get_data_block(inode, iblock, bh_result, create,
+                                                       flag, next_pgofs);
 }
 
 static int get_data_block_dio(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create)
 {
        return __get_data_block(inode, iblock, bh_result, create,
-                                               F2FS_GET_BLOCK_DIO);
+                                               F2FS_GET_BLOCK_DIO, NULL);
 }
 
 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -761,7 +777,7 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock,
                return -EFBIG;
 
        return __get_data_block(inode, iblock, bh_result, create,
-                                               F2FS_GET_BLOCK_BMAP);
+                                               F2FS_GET_BLOCK_BMAP, NULL);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -779,6 +795,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 {
        struct buffer_head map_bh;
        sector_t start_blk, last_blk;
+       pgoff_t next_pgofs;
        loff_t isize;
        u64 logical = 0, phys = 0, size = 0;
        u32 flags = 0;
@@ -814,14 +831,15 @@ next:
        map_bh.b_size = len;
 
        ret = get_data_block(inode, start_blk, &map_bh, 0,
-                                       F2FS_GET_BLOCK_FIEMAP);
+                                       F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
        if (ret)
                goto out;
 
        /* HOLE */
        if (!buffer_mapped(&map_bh)) {
+               start_blk = next_pgofs;
                /* Go through holes util pass the EOF */
-               if (blk_to_logical(inode, start_blk++) < isize)
+               if (blk_to_logical(inode, start_blk) < isize)
                        goto prep_next;
                /* Found a hole beyond isize means no more extents.
                 * Note that the premise is that filesystems don't
@@ -889,6 +907,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
        map.m_lblk = 0;
        map.m_len = 0;
        map.m_flags = 0;
+       map.m_next_pgofs = NULL;
 
        for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
 
@@ -927,7 +946,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
                        map.m_len = last_block - block_in_file;
 
                        if (f2fs_map_blocks(inode, &map, 0,
-                                                       F2FS_GET_BLOCK_READ))
+                                               F2FS_GET_BLOCK_READ))
                                goto set_error_page;
                }
 got_it:
@@ -1177,12 +1196,18 @@ out:
        inode_dec_dirty_pages(inode);
        if (err)
                ClearPageUptodate(page);
+
+       if (wbc->for_reclaim) {
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
+               remove_dirty_inode(inode);
+       }
+
        unlock_page(page);
        f2fs_balance_fs(sbi, need_balance_fs);
-       if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi))) {
+
+       if (unlikely(f2fs_cp_error(sbi)))
                f2fs_submit_merged_bio(sbi, DATA, WRITE);
-               remove_dirty_inode(inode);
-       }
+
        return 0;
 
 redirty_out:
@@ -1282,7 +1307,8 @@ continue_unlock:
 
                        if (PageWriteback(page)) {
                                if (wbc->sync_mode != WB_SYNC_NONE)
-                                       f2fs_wait_on_page_writeback(page, DATA);
+                                       f2fs_wait_on_page_writeback(page,
+                                                               DATA, true);
                                else
                                        goto continue_unlock;
                        }
@@ -1339,8 +1365,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
        int ret;
        long diff;
 
-       trace_f2fs_writepages(mapping->host, wbc, DATA);
-
        /* deal with chardevs and other special file */
        if (!mapping->a_ops->writepage)
                return 0;
@@ -1362,14 +1386,16 @@ static int f2fs_write_data_pages(struct address_space *mapping,
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto skip_write;
 
+       trace_f2fs_writepages(mapping->host, wbc, DATA);
+
        diff = nr_pages_to_write(sbi, DATA, wbc);
 
-       if (!S_ISDIR(inode->i_mode)) {
+       if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
                mutex_lock(&sbi->writepages);
                locked = true;
        }
        ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
-       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+       f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
        if (locked)
                mutex_unlock(&sbi->writepages);
 
@@ -1380,6 +1406,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
 skip_write:
        wbc->pages_skipped += get_dirty_pages(inode);
+       trace_f2fs_writepages(mapping->host, wbc, DATA);
        return 0;
 }
 
@@ -1406,6 +1433,14 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
        struct extent_info ei;
        int err = 0;
 
+       /*
+        * we already allocated all the blocks, so we don't need to get
+        * the block addresses when there is no need to fill the page.
+        */
+       if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
+                                       len == PAGE_CACHE_SIZE)
+               return 0;
+
        if (f2fs_has_inline_data(inode) ||
                        (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
                f2fs_lock_op(sbi);
@@ -1425,7 +1460,7 @@ restart:
                if (pos + len <= MAX_INLINE_DATA) {
                        read_inline_data(page, ipage);
                        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
-                       sync_inode_page(&dn);
+                       set_inline_node(ipage);
                } else {
                        err = f2fs_convert_inline_page(&dn, page);
                        if (err)
@@ -1439,13 +1474,9 @@ restart:
                if (f2fs_lookup_extent_cache(inode, index, &ei)) {
                        dn.data_blkaddr = ei.blk + index - ei.fofs;
                } else {
-                       bool restart = false;
-
                        /* hole case */
                        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-                       if (err || (!err && dn.data_blkaddr == NULL_ADDR))
-                               restart = true;
-                       if (restart) {
+                       if (err || (!err && dn.data_blkaddr == NULL_ADDR)) {
                                f2fs_put_dnode(&dn);
                                f2fs_lock_op(sbi);
                                locked = true;
@@ -1514,7 +1545,7 @@ repeat:
                }
        }
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
        /* wait for GCed encrypted page writeback */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
@@ -1592,7 +1623,6 @@ static int f2fs_write_end(struct file *file,
        if (pos + copied > i_size_read(inode)) {
                i_size_write(inode, pos + copied);
                mark_inode_dirty(inode);
-               update_inode_page(inode);
        }
 
        f2fs_put_page(page, 1);
@@ -1617,34 +1647,21 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                              loff_t offset)
 {
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
+       struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = mapping->host;
        size_t count = iov_iter_count(iter);
        int err;
 
-       /* we don't need to use inline_data strictly */
-       err = f2fs_convert_inline_inode(inode);
+       err = check_direct_IO(inode, iter, offset);
        if (err)
                return err;
 
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                return 0;
 
-       err = check_direct_IO(inode, iter, offset);
-       if (err)
-               return err;
-
        trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (iov_iter_rw(iter) == WRITE) {
-               err = __allocate_data_blocks(inode, offset, count);
-               if (err)
-                       goto out;
-       }
-
        err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
-out:
        if (err < 0 && iov_iter_rw(iter) == WRITE)
                f2fs_write_failed(mapping, offset + count);
 
index faa7495e2d7e62effef70b681c52ba2c75ca36d9..8950fc3cc2f7578520bfb340e80c7269b3dd8064 100644 (file)
@@ -296,7 +296,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
 {
        enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, type);
+       f2fs_wait_on_page_writeback(page, type, true);
        de->ino = cpu_to_le32(inode->i_ino);
        set_de_type(de, inode->i_mode);
        f2fs_dentry_kunmap(dir, page);
@@ -311,7 +311,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
 {
        struct f2fs_inode *ri;
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        /* copy name info. to this inode page */
        ri = F2FS_INODE(ipage);
@@ -598,7 +598,7 @@ start:
        ++level;
        goto start;
 add_dentry:
-       f2fs_wait_on_page_writeback(dentry_page, DATA);
+       f2fs_wait_on_page_writeback(dentry_page, DATA, true);
 
        if (inode) {
                down_write(&F2FS_I(inode)->i_sem);
@@ -709,7 +709,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
                return f2fs_delete_inline_entry(dentry, page, dir, inode);
 
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        dentry_blk = page_address(page);
        bit_pos = dentry - dentry_blk->dentry;
index ccd5c636d3fe026d0c5379d4062ee204fc7faa17..071a1b19e5afb793e8eeea924af9ce32eca30a76 100644 (file)
@@ -33,6 +33,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 
        en->ei = *ei;
        INIT_LIST_HEAD(&en->list);
+       en->et = et;
 
        rb_link_node(&en->rb_node, parent, p);
        rb_insert_color(&en->rb_node, &et->root);
@@ -50,6 +51,24 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
 
        if (et->cached_en == en)
                et->cached_en = NULL;
+       kmem_cache_free(extent_node_slab, en);
+}
+
+/*
+ * Flow to release an extent_node:
+ * 1. list_del_init
+ * 2. __detach_extent_node
+ * 3. kmem_cache_free.
+ */
+static void __release_extent_node(struct f2fs_sb_info *sbi,
+                       struct extent_tree *et, struct extent_node *en)
+{
+       spin_lock(&sbi->extent_lock);
+       f2fs_bug_on(sbi, list_empty(&en->list));
+       list_del_init(&en->list);
+       spin_unlock(&sbi->extent_lock);
+
+       __detach_extent_node(sbi, et, en);
 }
 
 static struct extent_tree *__grab_extent_tree(struct inode *inode)
@@ -129,7 +148,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
 }
 
 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
-                                       struct extent_tree *et, bool free_all)
+                                       struct extent_tree *et)
 {
        struct rb_node *node, *next;
        struct extent_node *en;
@@ -139,18 +158,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
        while (node) {
                next = rb_next(node);
                en = rb_entry(node, struct extent_node, rb_node);
-
-               if (free_all) {
-                       spin_lock(&sbi->extent_lock);
-                       if (!list_empty(&en->list))
-                               list_del_init(&en->list);
-                       spin_unlock(&sbi->extent_lock);
-               }
-
-               if (free_all || list_empty(&en->list)) {
-                       __detach_extent_node(sbi, et, en);
-                       kmem_cache_free(extent_node_slab, en);
-               }
+               __release_extent_node(sbi, et, en);
                node = next;
        }
 
@@ -232,9 +240,10 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
        if (en) {
                *ei = en->ei;
                spin_lock(&sbi->extent_lock);
-               if (!list_empty(&en->list))
+               if (!list_empty(&en->list)) {
                        list_move_tail(&en->list, &sbi->extent_list);
-               et->cached_en = en;
+                       et->cached_en = en;
+               }
                spin_unlock(&sbi->extent_lock);
                ret = true;
        }
@@ -329,7 +338,6 @@ lookup_neighbors:
 
 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
                                struct extent_tree *et, struct extent_info *ei,
-                               struct extent_node **den,
                                struct extent_node *prev_ex,
                                struct extent_node *next_ex)
 {
@@ -342,20 +350,25 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
        }
 
        if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
-               if (en) {
-                       __detach_extent_node(sbi, et, prev_ex);
-                       *den = prev_ex;
-               }
+               if (en)
+                       __release_extent_node(sbi, et, prev_ex);
                next_ex->ei.fofs = ei->fofs;
                next_ex->ei.blk = ei->blk;
                next_ex->ei.len += ei->len;
                en = next_ex;
        }
 
-       if (en) {
-               __try_update_largest_extent(et, en);
+       if (!en)
+               return NULL;
+
+       __try_update_largest_extent(et, en);
+
+       spin_lock(&sbi->extent_lock);
+       if (!list_empty(&en->list)) {
+               list_move_tail(&en->list, &sbi->extent_list);
                et->cached_en = en;
        }
+       spin_unlock(&sbi->extent_lock);
        return en;
 }
 
@@ -391,7 +404,12 @@ do_insert:
                return NULL;
 
        __try_update_largest_extent(et, en);
+
+       /* update in global extent list */
+       spin_lock(&sbi->extent_lock);
+       list_add_tail(&en->list, &sbi->extent_list);
        et->cached_en = en;
+       spin_unlock(&sbi->extent_lock);
        return en;
 }
 
@@ -479,7 +497,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
                if (parts)
                        __try_update_largest_extent(et, en);
                else
-                       __detach_extent_node(sbi, et, en);
+                       __release_extent_node(sbi, et, en);
 
                /*
                 * if original extent is split into zero or two parts, extent
@@ -490,31 +508,15 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
                        insert_p = NULL;
                        insert_parent = NULL;
                }
-
-               /* update in global extent list */
-               spin_lock(&sbi->extent_lock);
-               if (!parts && !list_empty(&en->list))
-                       list_del(&en->list);
-               if (en1)
-                       list_add_tail(&en1->list, &sbi->extent_list);
-               spin_unlock(&sbi->extent_lock);
-
-               /* release extent node */
-               if (!parts)
-                       kmem_cache_free(extent_node_slab, en);
-
                en = next_en;
        }
 
        /* 3. update extent in extent cache */
        if (blkaddr) {
-               struct extent_node *den = NULL;
 
                set_extent_info(&ei, fofs, blkaddr, len);
-               en1 = __try_merge_extent_node(sbi, et, &ei, &den,
-                                                       prev_en, next_en);
-               if (!en1)
-                       en1 = __insert_extent_tree(sbi, et, &ei,
+               if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+                       __insert_extent_tree(sbi, et, &ei,
                                                insert_p, insert_parent);
 
                /* give up extent_cache, if split and small updates happen */
@@ -524,24 +526,10 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
                        et->largest.len = 0;
                        set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
                }
-
-               spin_lock(&sbi->extent_lock);
-               if (en1) {
-                       if (list_empty(&en1->list))
-                               list_add_tail(&en1->list, &sbi->extent_list);
-                       else
-                               list_move_tail(&en1->list, &sbi->extent_list);
-               }
-               if (den && !list_empty(&den->list))
-                       list_del(&den->list);
-               spin_unlock(&sbi->extent_lock);
-
-               if (den)
-                       kmem_cache_free(extent_node_slab, den);
        }
 
        if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
-               __free_extent_tree(sbi, et, true);
+               __free_extent_tree(sbi, et);
 
        write_unlock(&et->lock);
 
@@ -550,14 +538,10 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
 
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 {
-       struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
        struct extent_tree *et, *next;
-       struct extent_node *en, *tmp;
-       unsigned long ino = F2FS_ROOT_INO(sbi);
-       unsigned int found;
+       struct extent_node *en;
        unsigned int node_cnt = 0, tree_cnt = 0;
        int remained;
-       bool do_free = false;
 
        if (!test_opt(sbi, EXTENT_CACHE))
                return 0;
@@ -572,10 +556,10 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
        list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
                if (atomic_read(&et->node_cnt)) {
                        write_lock(&et->lock);
-                       node_cnt += __free_extent_tree(sbi, et, true);
+                       node_cnt += __free_extent_tree(sbi, et);
                        write_unlock(&et->lock);
                }
-
+               f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
                list_del_init(&et->list);
                radix_tree_delete(&sbi->extent_tree_root, et->ino);
                kmem_cache_free(extent_tree_slab, et);
@@ -585,6 +569,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 
                if (node_cnt + tree_cnt >= nr_shrink)
                        goto unlock_out;
+               cond_resched();
        }
        up_write(&sbi->extent_tree_lock);
 
@@ -596,42 +581,29 @@ free_node:
        remained = nr_shrink - (node_cnt + tree_cnt);
 
        spin_lock(&sbi->extent_lock);
-       list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
-               if (!remained--)
+       for (; remained > 0; remained--) {
+               if (list_empty(&sbi->extent_list))
                        break;
-               list_del_init(&en->list);
-               do_free = true;
-       }
-       spin_unlock(&sbi->extent_lock);
-
-       if (do_free == false)
-               goto unlock_out;
-
-       /*
-        * reset ino for searching victims from beginning of global extent tree.
-        */
-       ino = F2FS_ROOT_INO(sbi);
-
-       while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
-                               (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
-               unsigned i;
-
-               ino = treevec[found - 1]->ino + 1;
-               for (i = 0; i < found; i++) {
-                       struct extent_tree *et = treevec[i];
+               en = list_first_entry(&sbi->extent_list,
+                                       struct extent_node, list);
+               et = en->et;
+               if (!write_trylock(&et->lock)) {
+                       /* refresh this extent node's position in extent list */
+                       list_move_tail(&en->list, &sbi->extent_list);
+                       continue;
+               }
 
-                       if (!atomic_read(&et->node_cnt))
-                               continue;
+               list_del_init(&en->list);
+               spin_unlock(&sbi->extent_lock);
 
-                       if (write_trylock(&et->lock)) {
-                               node_cnt += __free_extent_tree(sbi, et, false);
-                               write_unlock(&et->lock);
-                       }
+               __detach_extent_node(sbi, et, en);
 
-                       if (node_cnt + tree_cnt >= nr_shrink)
-                               goto unlock_out;
-               }
+               write_unlock(&et->lock);
+               node_cnt++;
+               spin_lock(&sbi->extent_lock);
        }
+       spin_unlock(&sbi->extent_lock);
+
 unlock_out:
        up_write(&sbi->extent_tree_lock);
 out:
@@ -650,7 +622,7 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
                return 0;
 
        write_lock(&et->lock);
-       node_cnt = __free_extent_tree(sbi, et, true);
+       node_cnt = __free_extent_tree(sbi, et);
        write_unlock(&et->lock);
 
        return node_cnt;
@@ -701,7 +673,6 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 
 void f2fs_update_extent_cache(struct dnode_of_data *dn)
 {
-       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
        pgoff_t fofs;
 
        if (!f2fs_may_extent_tree(dn->inode))
@@ -710,8 +681,8 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn)
        f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
 
 
-       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
-                                                       dn->ofs_in_node;
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+                                                               dn->ofs_in_node;
 
        if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
                sync_inode_page(dn);
index ff79054c6cf6a5bfe254abaf203ae94acd44739b..f6a841b85d40a45bb720afa3a04b8e3a42373d90 100644 (file)
@@ -354,6 +354,7 @@ struct extent_node {
        struct rb_node rb_node;         /* rb node located in rb-tree */
        struct list_head list;          /* node in global extent list of sbi */
        struct extent_info ei;          /* extent info */
+       struct extent_tree *et;         /* extent tree pointer */
 };
 
 struct extent_tree {
@@ -382,6 +383,7 @@ struct f2fs_map_blocks {
        block_t m_lblk;
        unsigned int m_len;
        unsigned int m_flags;
+       pgoff_t *m_next_pgofs;          /* point next possible non-hole pgofs */
 };
 
 /* for flag in get_data_block */
@@ -389,6 +391,8 @@ struct f2fs_map_blocks {
 #define F2FS_GET_BLOCK_DIO             1
 #define F2FS_GET_BLOCK_FIEMAP          2
 #define F2FS_GET_BLOCK_BMAP            3
+#define F2FS_GET_BLOCK_PRE_DIO         4
+#define F2FS_GET_BLOCK_PRE_AIO         5
 
 /*
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
@@ -515,6 +519,7 @@ struct f2fs_nm_info {
        nid_t next_scan_nid;            /* the next nid to be scanned */
        unsigned int ram_thresh;        /* control the memory footprint */
        unsigned int ra_nid_pages;      /* # of nid pages to be readaheaded */
+       unsigned int dirty_nats_ratio;  /* control dirty nats ratio threshold */
 
        /* NAT cache management */
        struct radix_tree_root nat_root;/* root of the nat entry cache */
@@ -549,6 +554,8 @@ struct dnode_of_data {
        unsigned int ofs_in_node;       /* data offset in the node page */
        bool inode_page_locked;         /* inode page is locked or not */
        bool node_changed;              /* is node block changed */
+       char cur_level;                 /* level of hole node page */
+       char max_level;                 /* level of current page located */
        block_t data_blkaddr;           /* block address of the node block */
 };
 
@@ -844,8 +851,19 @@ struct f2fs_sb_info {
        struct list_head s_list;
        struct mutex umount_mutex;
        unsigned int shrinker_run_no;
+
+       /* For write statistics */
+       u64 sectors_written_start;
+       u64 kbytes_written;
 };
 
+/* For write statistics. Suppose sector size is 512 bytes,
+ * and the return value is in kbytes. s is of struct f2fs_sb_info.
+ */
+#define BD_PART_WRITTEN(s)                                              \
+(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) -             \
+               s->sectors_written_start) >> 1)
+
 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
 {
        sbi->last_time[type] = jiffies;
@@ -1525,9 +1543,9 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
        return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
 }
 
-static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
+static inline unsigned int addrs_per_inode(struct inode *inode)
 {
-       if (f2fs_has_inline_xattr(&fi->vfs_inode))
+       if (f2fs_has_inline_xattr(inode))
                return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
        return DEF_ADDRS_PER_INODE;
 }
@@ -1681,10 +1699,10 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
         (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
 
 /* get offset of first page in next direct node */
-#define PGOFS_OF_NEXT_DNODE(pgofs, fi)                         \
-       ((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) :  \
-       (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) /       \
-       ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+#define PGOFS_OF_NEXT_DNODE(pgofs, inode)                              \
+       ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) :    \
+       (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) /    \
+       ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
 
 /*
  * file.c
@@ -1780,6 +1798,7 @@ int need_dentry_mark(struct f2fs_sb_info *, nid_t);
 bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
 bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
+pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int truncate_xattr_node(struct inode *, struct page *);
@@ -1836,7 +1855,7 @@ void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
                                block_t, block_t, unsigned char, bool);
 void allocate_data_block(struct f2fs_sb_info *, struct page *,
                block_t, block_t *, struct f2fs_summary *, int);
-void f2fs_wait_on_page_writeback(struct page *, enum page_type);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
 void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
 void write_data_summaries(struct f2fs_sb_info *, block_t);
 void write_node_summaries(struct f2fs_sb_info *, block_t);
@@ -1881,11 +1900,14 @@ void destroy_checkpoint_caches(void);
  * data.c
  */
 void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
+                               struct page *, nid_t, enum page_type, int);
 int f2fs_submit_page_bio(struct f2fs_io_info *);
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 int reserve_new_block(struct dnode_of_data *);
 int f2fs_get_block(struct dnode_of_data *, pgoff_t);
+ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
 int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
 struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
 struct page *find_data_page(struct inode *, pgoff_t);
@@ -1902,7 +1924,7 @@ int f2fs_release_page(struct page *, gfp_t);
  */
 int start_gc_thread(struct f2fs_sb_info *);
 void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
+block_t start_bidx_of_node(unsigned int, struct inode *);
 int f2fs_gc(struct f2fs_sb_info *, bool);
 void build_gc_manager(struct f2fs_sb_info *);
 
index ea272be62677004d29c8018691ea442c2c992bb1..a4362d4d714b09f838bf46cad48d6f7aa62de2fd 100644 (file)
@@ -86,7 +86,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
        trace_f2fs_vm_page_mkwrite(page, DATA);
 mapped:
        /* fill the page */
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
        /* wait for GCed encrypted page writeback */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
@@ -358,15 +358,14 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
                } else if (err == -ENOENT) {
                        /* direct node does not exists */
                        if (whence == SEEK_DATA) {
-                               pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
-                                                       F2FS_I(inode));
+                               pgofs = get_next_page_offset(&dn, pgofs);
                                continue;
                        } else {
                                goto found;
                        }
                }
 
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 
                /* find data/hole in dnode block */
                for (; dn.ofs_in_node < end_offset;
@@ -480,7 +479,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
                 * we will invalidate all blkaddr in the whole range.
                 */
                fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
-                                               F2FS_I(dn->inode)) + ofs;
+                                                       dn->inode) + ofs;
                f2fs_update_extent_cache_range(dn, fofs, 0, len);
                dec_valid_block_count(sbi, dn->inode, nr_free);
                sync_inode_page(dn);
@@ -521,7 +520,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
        if (IS_ERR(page))
                return 0;
 truncate_out:
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user(page, offset, PAGE_CACHE_SIZE - offset);
        if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
                set_page_dirty(page);
@@ -568,7 +567,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
                goto out;
        }
 
-       count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+       count = ADDRS_PER_PAGE(dn.node_page, inode);
 
        count -= dn.ofs_in_node;
        f2fs_bug_on(sbi, count < 0);
@@ -743,7 +742,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
        if (IS_ERR(page))
                return PTR_ERR(page);
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user(page, start, len);
        set_page_dirty(page);
        f2fs_put_page(page, 1);
@@ -768,7 +767,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
                        return err;
                }
 
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
                count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
 
                f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -892,7 +891,7 @@ static int __exchange_data_block(struct inode *inode, pgoff_t src,
                psrc = get_lock_data_page(inode, src, true);
                if (IS_ERR(psrc))
                        return PTR_ERR(psrc);
-               pdst = get_new_data_page(inode, NULL, dst, false);
+               pdst = get_new_data_page(inode, NULL, dst, true);
                if (IS_ERR(pdst)) {
                        f2fs_put_page(psrc, 1);
                        return PTR_ERR(pdst);
@@ -1648,7 +1647,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
                                        struct f2fs_defragment *range)
 {
        struct inode *inode = file_inode(filp);
-       struct f2fs_map_blocks map;
+       struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
        struct extent_info ei;
        pgoff_t pg_start, pg_end;
        unsigned int blk_per_seg = sbi->blocks_per_seg;
@@ -1874,14 +1873,32 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
-       struct inode *inode = file_inode(iocb->ki_filp);
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       ssize_t ret;
 
        if (f2fs_encrypted_inode(inode) &&
                                !f2fs_has_encryption_key(inode) &&
                                f2fs_get_encryption_info(inode))
                return -EACCES;
 
-       return generic_file_write_iter(iocb, from);
+       inode_lock(inode);
+       ret = generic_write_checks(iocb, from);
+       if (ret > 0) {
+               ret = f2fs_preallocate_blocks(iocb, from);
+               if (!ret)
+                       ret = __generic_file_write_iter(iocb, from);
+       }
+       inode_unlock(inode);
+
+       if (ret > 0) {
+               ssize_t err;
+
+               err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+               if (err < 0)
+                       ret = err;
+       }
+       return ret;
 }
 
 #ifdef CONFIG_COMPAT
index f610c2a9bdde9561d3be71ab4d674376db8ec401..47ade3542fbdd679471dfc9927f6d2782034c45f 100644 (file)
@@ -245,6 +245,18 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
                return get_cb_cost(sbi, segno);
 }
 
+static unsigned int count_bits(const unsigned long *addr,
+                               unsigned int offset, unsigned int len)
+{
+       unsigned int end = offset + len, sum = 0;
+
+       while (offset < end) {
+               if (test_bit(offset++, addr))
+                       ++sum;
+       }
+       return sum;
+}
+
 /*
  * This function is called from two paths.
  * One is garbage collection and the other is SSR segment selection.
@@ -260,7 +272,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        struct victim_sel_policy p;
        unsigned int secno, max_cost;
        unsigned int last_segment = MAIN_SEGS(sbi);
-       int nsearched = 0;
+       unsigned int nsearched = 0;
 
        mutex_lock(&dirty_i->seglist_lock);
 
@@ -295,26 +307,31 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                }
 
                p.offset = segno + p.ofs_unit;
-               if (p.ofs_unit > 1)
+               if (p.ofs_unit > 1) {
                        p.offset -= segno % p.ofs_unit;
+                       nsearched += count_bits(p.dirty_segmap,
+                                               p.offset - p.ofs_unit,
+                                               p.ofs_unit);
+               } else {
+                       nsearched++;
+               }
+
 
                secno = GET_SECNO(sbi, segno);
 
                if (sec_usage_check(sbi, secno))
-                       continue;
+                       goto next;
                if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
-                       continue;
+                       goto next;
 
                cost = get_gc_cost(sbi, segno, &p);
 
                if (p.min_cost > cost) {
                        p.min_segno = segno;
                        p.min_cost = cost;
-               } else if (unlikely(cost == max_cost)) {
-                       continue;
                }
-
-               if (nsearched++ >= p.max_search) {
+next:
+               if (nsearched >= p.max_search) {
                        sbi->last_victim[p.gc_mode] = segno;
                        break;
                }
@@ -399,7 +416,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
                struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
        bool initial = true;
@@ -419,7 +436,7 @@ next_step:
 
                /* stop BG_GC if there is not enough free sections. */
                if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-                       return 0;
+                       return;
 
                if (check_valid_map(sbi, segno, off) == 0)
                        continue;
@@ -446,7 +463,7 @@ next_step:
 
                /* set page dirty and write it */
                if (gc_type == FG_GC) {
-                       f2fs_wait_on_page_writeback(node_page, NODE);
+                       f2fs_wait_on_page_writeback(node_page, NODE, true);
                        set_page_dirty(node_page);
                } else {
                        if (!PageWriteback(node_page))
@@ -460,20 +477,6 @@ next_step:
                initial = false;
                goto next_step;
        }
-
-       if (gc_type == FG_GC) {
-               struct writeback_control wbc = {
-                       .sync_mode = WB_SYNC_ALL,
-                       .nr_to_write = LONG_MAX,
-                       .for_reclaim = 0,
-               };
-               sync_node_pages(sbi, 0, &wbc);
-
-               /* return 1 only if FG_GC succefully reclaimed one */
-               if (get_valid_blocks(sbi, segno, 1) == 0)
-                       return 1;
-       }
-       return 0;
 }
 
 /*
@@ -483,7 +486,7 @@ next_step:
  * as indirect or double indirect node blocks, are given, it must be a caller's
  * bug.
  */
-block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 {
        unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
        unsigned int bidx;
@@ -500,7 +503,7 @@ block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
                int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
                bidx = node_ofs - 5 - dec;
        }
-       return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
+       return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
 }
 
 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -567,7 +570,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
         * don't cache encrypted data into meta inode until previous dirty
         * data were writebacked to avoid racing between GC and flush.
         */
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        get_node_info(fio.sbi, dn.nid, &ni);
        set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
@@ -596,14 +599,14 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
                goto put_page_out;
 
        set_page_dirty(fio.encrypted_page);
-       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
+       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
        if (clear_page_dirty_for_io(fio.encrypted_page))
                dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
        set_page_writeback(fio.encrypted_page);
 
        /* allocate block address */
-       f2fs_wait_on_page_writeback(dn.node_page, NODE);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
        allocate_data_block(fio.sbi, NULL, fio.blk_addr,
                                        &fio.blk_addr, &sum, CURSEG_COLD_DATA);
        fio.rw = WRITE_SYNC;
@@ -645,7 +648,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
                        .encrypted_page = NULL,
                };
                set_page_dirty(page);
-               f2fs_wait_on_page_writeback(page, DATA);
+               f2fs_wait_on_page_writeback(page, DATA, true);
                if (clear_page_dirty_for_io(page))
                        inode_dec_dirty_pages(inode);
                set_cold_data(page);
@@ -663,7 +666,7 @@ out:
  * If the parent node is not valid or the data block address is different,
  * the victim data block is ignored.
  */
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
                struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 {
        struct super_block *sb = sbi->sb;
@@ -686,7 +689,7 @@ next_step:
 
                /* stop BG_GC if there is not enough free sections. */
                if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-                       return 0;
+                       return;
 
                if (check_valid_map(sbi, segno, off) == 0)
                        continue;
@@ -719,7 +722,7 @@ next_step:
                                continue;
                        }
 
-                       start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
+                       start_bidx = start_bidx_of_node(nofs, inode);
                        data_page = get_read_data_page(inode,
                                        start_bidx + ofs_in_node, READA, true);
                        if (IS_ERR(data_page)) {
@@ -735,7 +738,7 @@ next_step:
                /* phase 3 */
                inode = find_gc_inode(gc_list, dni.ino);
                if (inode) {
-                       start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
+                       start_bidx = start_bidx_of_node(nofs, inode)
                                                                + ofs_in_node;
                        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                                move_encrypted_block(inode, start_bidx);
@@ -747,15 +750,6 @@ next_step:
 
        if (++phase < 4)
                goto next_step;
-
-       if (gc_type == FG_GC) {
-               f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
-               /* return 1 only if FG_GC succefully reclaimed one */
-               if (get_valid_blocks(sbi, segno, 1) == 0)
-                       return 1;
-       }
-       return 0;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -771,53 +765,90 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
        return ret;
 }
 
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi,
+                               unsigned int start_segno,
                                struct gc_inode_list *gc_list, int gc_type)
 {
        struct page *sum_page;
        struct f2fs_summary_block *sum;
        struct blk_plug plug;
-       int nfree = 0;
+       unsigned int segno = start_segno;
+       unsigned int end_segno = start_segno + sbi->segs_per_sec;
+       int seg_freed = 0;
+       unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+                                               SUM_TYPE_DATA : SUM_TYPE_NODE;
 
-       /* read segment summary of victim */
-       sum_page = get_sum_page(sbi, segno);
+       /* readahead multi ssa blocks those have contiguous address */
+       if (sbi->segs_per_sec > 1)
+               ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+                                       sbi->segs_per_sec, META_SSA, true);
+
+       /* reference all summary page */
+       while (segno < end_segno) {
+               sum_page = get_sum_page(sbi, segno++);
+               unlock_page(sum_page);
+       }
 
        blk_start_plug(&plug);
 
-       sum = page_address(sum_page);
+       for (segno = start_segno; segno < end_segno; segno++) {
+               /* find segment summary of victim */
+               sum_page = find_get_page(META_MAPPING(sbi),
+                                       GET_SUM_BLOCK(sbi, segno));
+               f2fs_bug_on(sbi, !PageUptodate(sum_page));
+               f2fs_put_page(sum_page, 0);
 
-       /*
-        * this is to avoid deadlock:
-        * - lock_page(sum_page)         - f2fs_replace_block
-        *  - check_valid_map()            - mutex_lock(sentry_lock)
-        *   - mutex_lock(sentry_lock)     - change_curseg()
-        *                                  - lock_page(sum_page)
-        */
-       unlock_page(sum_page);
-
-       switch (GET_SUM_TYPE((&sum->footer))) {
-       case SUM_TYPE_NODE:
-               nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
-               break;
-       case SUM_TYPE_DATA:
-               nfree = gc_data_segment(sbi, sum->entries, gc_list,
-                                                       segno, gc_type);
-               break;
+               sum = page_address(sum_page);
+               f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+
+               /*
+                * this is to avoid deadlock:
+                * - lock_page(sum_page)         - f2fs_replace_block
+                *  - check_valid_map()            - mutex_lock(sentry_lock)
+                *   - mutex_lock(sentry_lock)     - change_curseg()
+                *                                  - lock_page(sum_page)
+                */
+
+               if (type == SUM_TYPE_NODE)
+                       gc_node_segment(sbi, sum->entries, segno, gc_type);
+               else
+                       gc_data_segment(sbi, sum->entries, gc_list, segno,
+                                                               gc_type);
+
+               stat_inc_seg_count(sbi, type, gc_type);
+               stat_inc_call_count(sbi->stat_info);
+
+               f2fs_put_page(sum_page, 0);
        }
-       blk_finish_plug(&plug);
 
-       stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
-       stat_inc_call_count(sbi->stat_info);
+       if (gc_type == FG_GC) {
+               if (type == SUM_TYPE_NODE) {
+                       struct writeback_control wbc = {
+                               .sync_mode = WB_SYNC_ALL,
+                               .nr_to_write = LONG_MAX,
+                               .for_reclaim = 0,
+                       };
+                       sync_node_pages(sbi, 0, &wbc);
+               } else {
+                       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+               }
+       }
 
-       f2fs_put_page(sum_page, 0);
-       return nfree;
+       blk_finish_plug(&plug);
+
+       if (gc_type == FG_GC) {
+               while (start_segno < end_segno)
+                       if (get_valid_blocks(sbi, start_segno++, 1) == 0)
+                               seg_freed++;
+       }
+       return seg_freed;
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
 {
-       unsigned int segno, i;
+       unsigned int segno;
        int gc_type = sync ? FG_GC : BG_GC;
-       int sec_freed = 0;
+       int sec_freed = 0, seg_freed;
        int ret = -EINVAL;
        struct cp_control cpc;
        struct gc_inode_list gc_list = {
@@ -838,30 +869,24 @@ gc_more:
 
        if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
                gc_type = FG_GC;
+               /*
+                * If there is no victim and no prefree segment but still not
+                * enough free sections, we should flush dent/node blocks and do
+                * garbage collections.
+                */
                if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
                        write_checkpoint(sbi, &cpc);
+               else if (has_not_enough_free_secs(sbi, 0))
+                       write_checkpoint(sbi, &cpc);
        }
 
        if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
                goto stop;
        ret = 0;
 
-       /* readahead multi ssa blocks those have contiguous address */
-       if (sbi->segs_per_sec > 1)
-               ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
-                                                       META_SSA, true);
-
-       for (i = 0; i < sbi->segs_per_sec; i++) {
-               /*
-                * for FG_GC case, halt gcing left segments once failed one
-                * of segments in selected section to avoid long latency.
-                */
-               if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
-                               gc_type == FG_GC)
-                       break;
-       }
+       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
 
-       if (i == sbi->segs_per_sec && gc_type == FG_GC)
+       if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
                sec_freed++;
 
        if (gc_type == FG_GC)
index c3f0b7d4cfca174bba6e6b4baa796640b767d2ec..0be4a9b400c63db0fecd285b3ad21fe665cb5f7f 100644 (file)
@@ -71,7 +71,7 @@ bool truncate_inline_inode(struct page *ipage, u64 from)
 
        addr = inline_data_addr(ipage);
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memset(addr + from, 0, MAX_INLINE_DATA - from);
 
        return true;
@@ -124,8 +124,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        if (err)
                return err;
 
-       f2fs_wait_on_page_writeback(page, DATA);
-
+       f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
        if (PageUptodate(page))
                goto no_update;
 
@@ -150,7 +149,7 @@ no_update:
        write_data_page(dn, &fio);
        set_data_blkaddr(dn);
        f2fs_update_extent_cache(dn);
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        if (dirty)
                inode_dec_dirty_pages(dn->inode);
 
@@ -159,6 +158,7 @@ no_update:
 
        /* clear inline data and flag after data writeback */
        truncate_inline_inode(dn->inode_page, 0);
+       clear_inline_node(dn->inode_page);
 clear_out:
        stat_dec_inline_inode(dn->inode);
        f2fs_clear_inline_inode(dn->inode);
@@ -223,7 +223,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
 
        f2fs_bug_on(F2FS_I_SB(inode), page->index);
 
-       f2fs_wait_on_page_writeback(dn.inode_page, NODE);
+       f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
        src_addr = kmap_atomic(page);
        dst_addr = inline_data_addr(dn.inode_page);
        memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
@@ -233,6 +233,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
 
        sync_inode_page(&dn);
+       clear_inline_node(dn.inode_page);
        f2fs_put_dnode(&dn);
        return 0;
 }
@@ -261,7 +262,7 @@ process_inline:
                ipage = get_node_page(sbi, inode->i_ino);
                f2fs_bug_on(sbi, IS_ERR(ipage));
 
-               f2fs_wait_on_page_writeback(ipage, NODE);
+               f2fs_wait_on_page_writeback(ipage, NODE, true);
 
                src_addr = inline_data_addr(npage);
                dst_addr = inline_data_addr(ipage);
@@ -389,7 +390,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
        if (err)
                goto out;
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
 
        dentry_blk = kmap_atomic(page);
@@ -469,7 +470,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
                }
        }
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        name_hash = f2fs_dentry_hash(name);
        make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
@@ -507,7 +508,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
        int i;
 
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
 
        inline_dentry = inline_data_addr(page);
        bit_pos = dentry - inline_dentry->dentry;
index 2adeff26be11b9689dde24a5d22b0bd3351c21b7..60e3b3078b81469b300fca0d4bf44b2117f709bf 100644 (file)
@@ -83,7 +83,7 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
 
        while (start < end) {
                if (*start++) {
-                       f2fs_wait_on_page_writeback(ipage, NODE);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
                        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
                        set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
@@ -227,7 +227,7 @@ int update_inode(struct inode *inode, struct page *node_page)
 {
        struct f2fs_inode *ri;
 
-       f2fs_wait_on_page_writeback(node_page, NODE);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
 
        ri = F2FS_INODE(node_page);
 
@@ -263,6 +263,10 @@ int update_inode(struct inode *inode, struct page *node_page)
        set_cold_node(inode, node_page);
        clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 
+       /* deleted inode */
+       if (inode->i_nlink == 0)
+               clear_inline_node(node_page);
+
        return set_page_dirty(node_page);
 }
 
index 342597a5897f059a2d31823923d8664861b7b969..150907ffa7aaf871772488c880d98ce3d86620c5 100644 (file)
@@ -403,14 +403,45 @@ cache:
        up_write(&nm_i->nat_tree_lock);
 }
 
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+       const long direct_index = ADDRS_PER_INODE(dn->inode);
+       const long direct_blks = ADDRS_PER_BLOCK;
+       const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+       unsigned int skipped_unit = ADDRS_PER_BLOCK;
+       int cur_level = dn->cur_level;
+       int max_level = dn->max_level;
+       pgoff_t base = 0;
+
+       if (!dn->max_level)
+               return pgofs + 1;
+
+       while (max_level-- > cur_level)
+               skipped_unit *= NIDS_PER_BLOCK;
+
+       switch (dn->max_level) {
+       case 3:
+               base += 2 * indirect_blks;
+       case 2:
+               base += 2 * direct_blks;
+       case 1:
+               base += direct_index;
+               break;
+       default:
+               f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+       }
+
+       return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
+}
+
 /*
  * The maximum depth is four.
  * Offset[0] will have raw inode offset.
  */
-static int get_node_path(struct f2fs_inode_info *fi, long block,
+static int get_node_path(struct inode *inode, long block,
                                int offset[4], unsigned int noffset[4])
 {
-       const long direct_index = ADDRS_PER_INODE(fi);
+       const long direct_index = ADDRS_PER_INODE(inode);
        const long direct_blks = ADDRS_PER_BLOCK;
        const long dptrs_per_blk = NIDS_PER_BLOCK;
        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@@ -495,10 +526,10 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
        int offset[4];
        unsigned int noffset[4];
        nid_t nids[4];
-       int level, i;
+       int level, i = 0;
        int err = 0;
 
-       level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
+       level = get_node_path(dn->inode, index, offset, noffset);
 
        nids[0] = dn->inode->i_ino;
        npage[0] = dn->inode_page;
@@ -585,6 +616,10 @@ release_pages:
 release_out:
        dn->inode_page = NULL;
        dn->node_page = NULL;
+       if (err == -ENOENT) {
+               dn->cur_level = i;
+               dn->max_level = level;
+       }
        return err;
 }
 
@@ -792,7 +827,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 
        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 
-       level = get_node_path(F2FS_I(inode), from, offset, noffset);
+       level = get_node_path(inode, from, offset, noffset);
 restart:
        page = get_node_page(sbi, inode->i_ino);
        if (IS_ERR(page)) {
@@ -861,7 +896,7 @@ skip_partial:
                                f2fs_put_page(page, 1);
                                goto restart;
                        }
-                       f2fs_wait_on_page_writeback(page, NODE);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
                        ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
                        set_page_dirty(page);
                        unlock_page(page);
@@ -976,7 +1011,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
        new_ni.ino = dn->inode->i_ino;
        set_node_addr(sbi, &new_ni, NEW_ADDR, false);
 
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
        fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
        set_cold_node(dn->inode, page);
        SetPageUptodate(page);
@@ -1154,6 +1189,39 @@ void sync_inode_page(struct dnode_of_data *dn)
        dn->node_changed = ret ? true: false;
 }
 
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       struct inode *inode;
+       struct page *page;
+
+       /* should flush inline_data before evict_inode */
+       inode = ilookup(sbi->sb, ino);
+       if (!inode)
+               return;
+
+       page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
+       if (!page)
+               goto iput_out;
+
+       if (!PageUptodate(page))
+               goto page_out;
+
+       if (!PageDirty(page))
+               goto page_out;
+
+       if (!clear_page_dirty_for_io(page))
+               goto page_out;
+
+       if (!f2fs_write_inline_data(inode, page))
+               inode_dec_dirty_pages(inode);
+       else
+               set_page_dirty(page);
+page_out:
+       f2fs_put_page(page, 1);
+iput_out:
+       iput(inode);
+}
+
 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
                                        struct writeback_control *wbc)
 {
@@ -1221,6 +1289,17 @@ continue_unlock:
                                goto continue_unlock;
                        }
 
+                       /* flush inline_data */
+                       if (!ino && is_inline_node(page)) {
+                               clear_inline_node(page);
+                               unlock_page(page);
+                               flush_inline_data(sbi, ino_of_node(page));
+                               continue;
+                       }
+
+                       f2fs_wait_on_page_writeback(page, NODE, true);
+
+                       BUG_ON(PageWriteback(page));
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
@@ -1258,8 +1337,13 @@ continue_unlock:
                goto next_step;
        }
 
-       if (wrote)
-               f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       if (wrote) {
+               if (ino)
+                       f2fs_submit_merged_bio_cond(sbi, NULL, NULL,
+                                                       ino, NODE, WRITE);
+               else
+                       f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       }
        return nwritten;
 }
 
@@ -1287,7 +1371,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
                                continue;
 
                        if (ino && ino_of_node(page) == ino) {
-                               f2fs_wait_on_page_writeback(page, NODE);
+                               f2fs_wait_on_page_writeback(page, NODE, true);
                                if (TestClearPageError(page))
                                        ret = -EIO;
                        }
@@ -1326,8 +1410,6 @@ static int f2fs_write_node_page(struct page *page,
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       f2fs_wait_on_page_writeback(page, NODE);
-
        /* get old block addr of this node page */
        nid = nid_of_node(page);
        f2fs_bug_on(sbi, page->index != nid);
@@ -1356,9 +1438,13 @@ static int f2fs_write_node_page(struct page *page,
        set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
        dec_page_count(sbi, F2FS_DIRTY_NODES);
        up_read(&sbi->node_write);
+
+       if (wbc->for_reclaim)
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
+
        unlock_page(page);
 
-       if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi)))
+       if (unlikely(f2fs_cp_error(sbi)))
                f2fs_submit_merged_bio(sbi, NODE, WRITE);
 
        return 0;
@@ -1374,8 +1460,6 @@ static int f2fs_write_node_pages(struct address_space *mapping,
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        long diff;
 
-       trace_f2fs_writepages(mapping->host, wbc, NODE);
-
        /* balancing f2fs's metadata in background */
        f2fs_balance_fs_bg(sbi);
 
@@ -1383,6 +1467,8 @@ static int f2fs_write_node_pages(struct address_space *mapping,
        if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
                goto skip_write;
 
+       trace_f2fs_writepages(mapping->host, wbc, NODE);
+
        diff = nr_pages_to_write(sbi, NODE, wbc);
        wbc->sync_mode = WB_SYNC_NONE;
        sync_node_pages(sbi, 0, wbc);
@@ -1391,6 +1477,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 
 skip_write:
        wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+       trace_f2fs_writepages(mapping->host, wbc, NODE);
        return 0;
 }
 
@@ -1703,7 +1790,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
        src_addr = inline_xattr_addr(page);
        inline_size = inline_xattr_size(inode);
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memcpy(dst_addr, src_addr, inline_size);
 update_inode:
        update_inode(inode, ipage);
@@ -2000,6 +2087,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        nm_i->nat_cnt = 0;
        nm_i->ram_thresh = DEF_RAM_THRESHOLD;
        nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+       nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
 
        INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
        INIT_LIST_HEAD(&nm_i->free_nid_list);
index d4d1f636fe1c36c923ebcd68da46249161ccafc8..1f4f9d4569d9cb4f8a6ed93ff112fa300cd49444 100644 (file)
@@ -25,6 +25,9 @@
 /* control the memory footprint threshold (10MB per 1GB ram) */
 #define DEF_RAM_THRESHOLD      10
 
+/* control dirty nats ratio threshold (default: 10% over max nid count) */
+#define DEF_DIRTY_NAT_RATIO_THRESHOLD          10
+
 /* vector size for gang look-up from nat cache that consists of radix tree */
 #define NATVEC_SIZE    64
 #define SETVEC_SIZE    32
@@ -117,6 +120,12 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
        raw_ne->version = ni->version;
 }
 
+static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
+{
+       return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+                                       NM_I(sbi)->dirty_nats_ratio / 100;
+}
+
 enum mem_type {
        FREE_NIDS,      /* indicates the free nid list */
        NAT_ENTRIES,    /* indicates the cached nat entry */
@@ -321,7 +330,7 @@ static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
 {
        struct f2fs_node *rn = F2FS_NODE(p);
 
-       f2fs_wait_on_page_writeback(p, NODE);
+       f2fs_wait_on_page_writeback(p, NODE, true);
 
        if (i)
                rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
@@ -370,6 +379,21 @@ static inline int is_node(struct page *page, int type)
 #define is_fsync_dnode(page)   is_node(page, FSYNC_BIT_SHIFT)
 #define is_dent_dnode(page)    is_node(page, DENT_BIT_SHIFT)
 
+static inline int is_inline_node(struct page *page)
+{
+       return PageChecked(page);
+}
+
+static inline void set_inline_node(struct page *page)
+{
+       SetPageChecked(page);
+}
+
+static inline void clear_inline_node(struct page *page)
+{
+       ClearPageChecked(page);
+}
+
 static inline void set_cold_node(struct inode *inode, struct page *page)
 {
        struct f2fs_node *rn = F2FS_NODE(page);
index 589b20b8677b8cc1dcf3d16a6f7e0943ce0283eb..5045dd6a27e96c9a890e3e885ac01947c4914c53 100644 (file)
@@ -350,8 +350,7 @@ got_it:
                inode = dn->inode;
        }
 
-       bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
-                       le16_to_cpu(sum.ofs_in_node);
+       bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
 
        /*
         * if inode page is locked, unlock temporarily, but its reference
@@ -386,10 +385,9 @@ truncate_out:
 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
                                        struct page *page, block_t blkaddr)
 {
-       struct f2fs_inode_info *fi = F2FS_I(inode);
-       unsigned int start, end;
        struct dnode_of_data dn;
        struct node_info ni;
+       unsigned int start, end;
        int err = 0, recovered = 0;
 
        /* step 1: recover xattr */
@@ -409,8 +407,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
                goto out;
 
        /* step 3: recover data indices */
-       start = start_bidx_of_node(ofs_of_node(page), fi);
-       end = start + ADDRS_PER_PAGE(page, fi);
+       start = start_bidx_of_node(ofs_of_node(page), inode);
+       end = start + ADDRS_PER_PAGE(page, inode);
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
 
@@ -418,7 +416,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        if (err)
                goto out;
 
-       f2fs_wait_on_page_writeback(dn.node_page, NODE);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
        get_node_info(sbi, dn.nid, &ni);
        f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
index 5904a411c86fe8d4feb70f695809273b3a46bd6a..57a5f7bb275ae21b0935e1d8f89e83624bf06117 100644 (file)
@@ -223,7 +223,8 @@ int commit_inmem_pages(struct inode *inode, bool abort)
                if (!abort) {
                        if (cur->page->mapping == inode->i_mapping) {
                                set_page_dirty(cur->page);
-                               f2fs_wait_on_page_writeback(cur->page, DATA);
+                               f2fs_wait_on_page_writeback(cur->page, DATA,
+                                                                       true);
                                if (clear_page_dirty_for_io(cur->page))
                                        inode_dec_dirty_pages(inode);
                                trace_f2fs_commit_inmem_page(cur->page, INMEM);
@@ -253,7 +254,8 @@ int commit_inmem_pages(struct inode *inode, bool abort)
        if (!abort) {
                f2fs_unlock_op(sbi);
                if (submit_bio)
-                       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+                       f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0,
+                                                               DATA, WRITE);
        }
        return err;
 }
@@ -291,8 +293,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 
        /* checkpoint is the only way to shrink partial cached entries */
        if (!available_free_memory(sbi, NAT_ENTRIES) ||
-                       excess_prefree_segs(sbi) ||
                        !available_free_memory(sbi, INO_ENTRIES) ||
+                       excess_prefree_segs(sbi) ||
+                       excess_dirty_nats(sbi) ||
                        (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
                if (test_opt(sbi, DATA_FLUSH))
                        sync_dirty_inodes(sbi, FILE_INODE);
@@ -873,9 +876,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
 
        if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
                segno = find_next_zero_bit(free_i->free_segmap,
-                                       MAIN_SEGS(sbi), *newseg + 1);
-               if (segno - *newseg < sbi->segs_per_sec -
-                                       (*newseg % sbi->segs_per_sec))
+                               (hint + 1) * sbi->segs_per_sec, *newseg + 1);
+               if (segno < (hint + 1) * sbi->segs_per_sec)
                        goto got_it;
        }
 find_other_zone:
@@ -1415,53 +1417,17 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
        f2fs_update_extent_cache(dn);
 }
 
-static inline bool is_merged_page(struct f2fs_sb_info *sbi,
-                                       struct page *page, enum page_type type)
-{
-       enum page_type btype = PAGE_TYPE_OF_BIO(type);
-       struct f2fs_bio_info *io = &sbi->write_io[btype];
-       struct bio_vec *bvec;
-       struct page *target;
-       int i;
-
-       down_read(&io->io_rwsem);
-       if (!io->bio) {
-               up_read(&io->io_rwsem);
-               return false;
-       }
-
-       bio_for_each_segment_all(bvec, io->bio, i) {
-
-               if (bvec->bv_page->mapping) {
-                       target = bvec->bv_page;
-               } else {
-                       struct f2fs_crypto_ctx *ctx;
-
-                       /* encrypted page */
-                       ctx = (struct f2fs_crypto_ctx *)page_private(
-                                                               bvec->bv_page);
-                       target = ctx->w.control_page;
-               }
-
-               if (page == target) {
-                       up_read(&io->io_rwsem);
-                       return true;
-               }
-       }
-
-       up_read(&io->io_rwsem);
-       return false;
-}
-
 void f2fs_wait_on_page_writeback(struct page *page,
-                               enum page_type type)
+                               enum page_type type, bool ordered)
 {
        if (PageWriteback(page)) {
                struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
-               if (is_merged_page(sbi, page, type))
-                       f2fs_submit_merged_bio(sbi, type, WRITE);
-               wait_on_page_writeback(page);
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
+               if (ordered)
+                       wait_on_page_writeback(page);
+               else
+                       wait_for_stable_page(page);
        }
 }
 
@@ -1477,7 +1443,7 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
 
        cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
        if (cpage) {
-               f2fs_wait_on_page_writeback(cpage, DATA);
+               f2fs_wait_on_page_writeback(cpage, DATA, true);
                f2fs_put_page(cpage, 1);
        }
 }
index ee44d346ea44143e5610c59c3d0ed66c88526c43..cd7111b9a664f542865e1272b25554443ac90c5a 100644 (file)
@@ -183,7 +183,7 @@ struct segment_allocation {
  * this value is set in page as a private data which indicate that
  * the page is atomically written, and it is in inmem_pages list.
  */
-#define ATOMIC_WRITTEN_PAGE            0x0000ffff
+#define ATOMIC_WRITTEN_PAGE            ((unsigned long)-1)
 
 #define IS_ATOMIC_WRITTEN_PAGE(page)                   \
                (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
index 6134832baaaf0b25a86ed9cec1e8121b3ba3e4e2..9445a34b8d48c184ecf0762b65dc2096896d2885 100644 (file)
@@ -126,6 +126,19 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
        return NULL;
 }
 
+static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
+               struct f2fs_sb_info *sbi, char *buf)
+{
+       struct super_block *sb = sbi->sb;
+
+       if (!sb->s_bdev->bd_part)
+               return snprintf(buf, PAGE_SIZE, "0\n");
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n",
+               (unsigned long long)(sbi->kbytes_written +
+                       BD_PART_WRITTEN(sbi)));
+}
+
 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
                        struct f2fs_sb_info *sbi, char *buf)
 {
@@ -204,6 +217,9 @@ static struct f2fs_attr f2fs_attr_##_name = {                       \
                f2fs_sbi_show, f2fs_sbi_store,                  \
                offsetof(struct struct_name, elname))
 
+#define F2FS_GENERAL_RO_ATTR(name) \
+static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
+
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
@@ -216,10 +232,12 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
 
 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
 static struct attribute *f2fs_attrs[] = {
@@ -237,8 +255,10 @@ static struct attribute *f2fs_attrs[] = {
        ATTR_LIST(dir_level),
        ATTR_LIST(ram_thresh),
        ATTR_LIST(ra_nid_pages),
+       ATTR_LIST(dirty_nats_ratio),
        ATTR_LIST(cp_interval),
        ATTR_LIST(idle_interval),
+       ATTR_LIST(lifetime_write_kbytes),
        NULL,
 };
 
@@ -562,6 +582,13 @@ static void f2fs_put_super(struct super_block *sb)
        f2fs_leave_shrinker(sbi);
        mutex_unlock(&sbi->umount_mutex);
 
+       /* our cp_error case, we can wait for any writeback page */
+       if (get_pages(sbi, F2FS_WRITEBACK)) {
+               f2fs_submit_merged_bio(sbi, DATA, WRITE);
+               f2fs_submit_merged_bio(sbi, NODE, WRITE);
+               f2fs_submit_merged_bio(sbi, META, WRITE);
+       }
+
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
 
@@ -766,8 +793,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        bool need_stop_gc = false;
        bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
 
-       sync_filesystem(sb);
-
        /*
         * Save the old mount options in case we
         * need to restore them.
@@ -775,6 +800,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        org_mount_opt = sbi->mount_opt;
        active_logs = sbi->active_logs;
 
+       if (*flags & MS_RDONLY) {
+               set_opt(sbi, FASTBOOT);
+               set_sbi_flag(sbi, SBI_IS_DIRTY);
+       }
+
+       sync_filesystem(sb);
+
        sbi->mount_opt.opt = 0;
        default_options(sbi);
 
@@ -1242,6 +1274,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        bool retry = true, need_fsck = false;
        char *options = NULL;
        int recovery, i, valid_super_block;
+       struct curseg_info *seg_i;
 
 try_onemore:
        err = -EINVAL;
@@ -1372,6 +1405,17 @@ try_onemore:
                goto free_nm;
        }
 
+       /* For write statistics */
+       if (sb->s_bdev->bd_part)
+               sbi->sectors_written_start =
+                       (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+
+       /* Read accumulated write IO statistics if exists */
+       seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+       if (__exist_node_summaries(sbi))
+               sbi->kbytes_written =
+                       le64_to_cpu(seg_i->sum_blk->info.kbytes_written);
+
        build_gc_manager(sbi);
 
        /* get an inode for node space */
index 10f1e784fa2390148aaa37f527526162be2d724d..06a72dc0191a022049bbf48ad26e127bb6211c02 100644 (file)
@@ -300,7 +300,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
 
                if (ipage) {
                        inline_addr = inline_xattr_addr(ipage);
-                       f2fs_wait_on_page_writeback(ipage, NODE);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
                } else {
                        page = get_node_page(sbi, inode->i_ino);
                        if (IS_ERR(page)) {
@@ -308,7 +308,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
                                return PTR_ERR(page);
                        }
                        inline_addr = inline_xattr_addr(page);
-                       f2fs_wait_on_page_writeback(page, NODE);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
                }
                memcpy(inline_addr, txattr_addr, inline_size);
                f2fs_put_page(page, 1);
@@ -329,7 +329,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
                        return PTR_ERR(xpage);
                }
                f2fs_bug_on(sbi, new_nid);
-               f2fs_wait_on_page_writeback(xpage, NODE);
+               f2fs_wait_on_page_writeback(xpage, NODE, true);
        } else {
                struct dnode_of_data dn;
                set_new_dnode(&dn, inode, NULL, NULL, new_nid);
index 93f07465e5a682618844fa53bba03c859526141e..aa016e4b8bec976d57b8a36cdbb3c12c7e8e39ff 100644 (file)
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
         * the first place, mapping->nr_pages will always be zero.
         */
        if (mapping->nrpages) {
-               loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
+               loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1);
                loff_t len = iov_iter_count(iter);
                loff_t end = PAGE_ALIGN(offset + len) - 1;
 
index 6a92592304fb54a73b39abf1bf41c99551ca80d1..d4014af4f064f60d05345e72cdfb6953c3abb9d9 100644 (file)
@@ -798,7 +798,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
        int error;
 
        error = get_leaf_nr(dip, index, &leaf_no);
-       if (!error)
+       if (!IS_ERR_VALUE(error))
                error = get_leaf(dip, leaf_no, bh_out);
 
        return error;
@@ -1014,7 +1014,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
 
        index = name->hash >> (32 - dip->i_depth);
        error = get_leaf_nr(dip, index, &leaf_no);
-       if (error)
+       if (IS_ERR_VALUE(error))
                return error;
 
        /*  Get the old leaf block  */
index a4ff7b56f5cdbabc8a1e12830bf0e36221a4c53e..7f0257309b3e8a5ef3f180b400fe5e39f0143e65 100644 (file)
@@ -572,6 +572,12 @@ static void delete_work_func(struct work_struct *work)
        struct inode *inode;
        u64 no_addr = gl->gl_name.ln_number;
 
+       /* If someone's using this glock to create a new dinode, the block must
+          have been freed by another node, then re-used, in which case our
+          iopen callback is too late after the fact. Ignore it. */
+       if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
+               goto out;
+
        ip = gl->gl_object;
        /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
 
@@ -583,6 +589,7 @@ static void delete_work_func(struct work_struct *work)
                d_prune_aliases(inode);
                iput(inode);
        }
+out:
        gfs2_glock_put(gl);
 }
 
@@ -1015,6 +1022,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
                handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 
        list_del_init(&gh->gh_list);
+       clear_bit(HIF_HOLDER, &gh->gh_iflags);
        if (find_first_holder(gl) == NULL) {
                if (glops->go_unlock) {
                        GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
index 845fb09cc60668037b67edf243c2ab05c6f9e6e4..a6a3389a07fc67187288e5bb82a3b19d4996bf37 100644 (file)
@@ -328,6 +328,7 @@ enum {
        GLF_LRU                         = 13,
        GLF_OBJECT                      = 14, /* Used only for tracing */
        GLF_BLOCKING                    = 15,
+       GLF_INODE_CREATING              = 16, /* Inode creation occurring */
 };
 
 struct gfs2_glock {
index 352f958769e15b150575c54b09d82b51d34f258e..5fdb319fc2426925edb0ebdb35b7d19adacf705c 100644 (file)
@@ -592,7 +592,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        struct inode *inode = NULL;
        struct gfs2_inode *dip = GFS2_I(dir), *ip;
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error, free_vfs_inode = 1;
        u32 aflags = 0;
        unsigned blocks = 1;
@@ -729,6 +729,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (error)
                goto fail_gunlock2;
 
+       BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
+
        error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (error)
                goto fail_gunlock2;
@@ -771,12 +773,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        }
        gfs2_glock_dq_uninit(ghs);
        gfs2_glock_dq_uninit(ghs + 1);
+       clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
        return error;
 
 fail_gunlock3:
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
        gfs2_glock_put(io_gl);
 fail_gunlock2:
+       if (io_gl)
+               clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
        gfs2_glock_dq_uninit(ghs + 1);
 fail_free_inode:
        if (ip->i_gl)
index 8f960a51a9a0d72b238e244eca9a336eacbd77f9..f8a0cd821290976bcf45846c11123bb31997bb61 100644 (file)
@@ -1551,12 +1551,16 @@ static void gfs2_evict_inode(struct inode *inode)
                        goto out_truncate;
        }
 
-       ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
-       gfs2_glock_dq_wait(&ip->i_iopen_gh);
-       gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
-       error = gfs2_glock_nq(&ip->i_iopen_gh);
-       if (error)
-               goto out_truncate;
+       if (ip->i_iopen_gh.gh_gl &&
+           test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+               gfs2_glock_dq_wait(&ip->i_iopen_gh);
+               gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
+                                  &ip->i_iopen_gh);
+               error = gfs2_glock_nq(&ip->i_iopen_gh);
+               if (error)
+                       goto out_truncate;
+       }
 
        /* Case 1 starts here */
 
@@ -1606,11 +1610,13 @@ out_unlock:
        if (gfs2_rs_active(&ip->i_res))
                gfs2_rs_deltree(&ip->i_res);
 
-       if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
-               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
-               gfs2_glock_dq_wait(&ip->i_iopen_gh);
+       if (ip->i_iopen_gh.gh_gl) {
+               if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+                       ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+                       gfs2_glock_dq_wait(&ip->i_iopen_gh);
+               }
+               gfs2_holder_uninit(&ip->i_iopen_gh);
        }
-       gfs2_holder_uninit(&ip->i_iopen_gh);
        gfs2_glock_dq_uninit(&gh);
        if (error && error != GLR_TRYFAILED && error != -EROFS)
                fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
index 5bcd92d50e820ab6ccfa5e7789ee01152176b857..0cb1abd535e38469201478ee597e74045e593bc5 100644 (file)
@@ -1215,7 +1215,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
                                        hdr->pgio_mirror_idx + 1,
                                        &hdr->pgio_mirror_idx))
                        goto out_eagain;
-               set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+               set_bit(NFS_LAYOUT_RETURN_REQUESTED,
                        &hdr->lseg->pls_layout->plh_flags);
                pnfs_read_resend_pnfs(hdr);
                return task->tk_status;
index 29898a9550fa6f72541cbedc7ac1f0270c5a7c5c..eb370460ce203c11c8461e88115cc0fab9c417fa 100644 (file)
@@ -412,7 +412,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                                         OP_ILLEGAL, GFP_NOIO);
                if (!fail_return) {
                        if (ff_layout_has_available_ds(lseg))
-                               set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                               set_bit(NFS_LAYOUT_RETURN_REQUESTED,
                                        &lseg->pls_layout->plh_flags);
                        else
                                pnfs_error_mark_layout_for_return(ino, lseg);
index 4bfc33ad05637f58f9f0b675f058fd90085d4271..e810ab57a5115f4e30d6f0e4aa2f9668ae616740 100644 (file)
@@ -4707,20 +4707,17 @@ out:
 /*
  * The getxattr API returns the required buffer length when called with a
  * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
- * the required buf.  On a NULL buf, we send a page of data to the server
- * guessing that the ACL request can be serviced by a page. If so, we cache
- * up to the page of ACL data, and the 2nd call to getxattr is serviced by
- * the cache. If not so, we throw away the page, and cache the required
- * length. The next getxattr call will then produce another round trip to
- * the server, this time with the input buf of the required size.
+ * the required buf. Cache the result from the first getxattr call to avoid
+ * sending another RPC.
  */
 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
 {
-       struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+       /* enough pages to hold ACL data plus the bitmap and acl length */
+       struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
        struct nfs_getaclargs args = {
                .fh = NFS_FH(inode),
+               /* The xdr layer may allocate pages here. */
                .acl_pages = pages,
-               .acl_len = buflen,
        };
        struct nfs_getaclres res = {
                .acl_len = buflen,
@@ -4730,31 +4727,24 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
        int ret = -ENOMEM, i;
 
-       /* As long as we're doing a round trip to the server anyway,
-        * let's be prepared for a page of acl data. */
-       if (npages == 0)
-               npages = 1;
-       if (npages > ARRAY_SIZE(pages))
-               return -ERANGE;
-
-       for (i = 0; i < npages; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
-               if (!pages[i])
-                       goto out_free;
-       }
+       /*
+        * There will be some data returned by the server, how much is not
+        * known yet. Allocate one page and let the XDR layer allocate
+        * more if needed.
+        */
+       pages[0] = alloc_page(GFP_KERNEL);
 
        /* for decoding across pages */
        res.acl_scratch = alloc_page(GFP_KERNEL);
        if (!res.acl_scratch)
                goto out_free;
 
-       args.acl_len = npages * PAGE_SIZE;
+       args.acl_len = ARRAY_SIZE(pages) << PAGE_SHIFT;
 
-       dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
-               __func__, buf, buflen, npages, args.acl_len);
+       dprintk("%s  buf %p buflen %zu args.acl_len %zu\n",
+               __func__, buf, buflen, args.acl_len);
        ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
                             &msg, &args.seq_args, &res.seq_res, 0);
        if (ret)
@@ -4779,7 +4769,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
 out_ok:
        ret = res.acl_len;
 out_free:
-       for (i = 0; i < npages; i++)
+       for (i = 0; i < ARRAY_SIZE(pages) && pages[i]; i++)
                if (pages[i])
                        __free_page(pages[i]);
        if (res.acl_scratch)
index a3592cc34a20b76341c9c9b9af3378bdddebd477..482b6e94bb37cd1f3abaca235e94d961d9c15cb4 100644 (file)
@@ -52,9 +52,7 @@ static DEFINE_SPINLOCK(pnfs_spinlock);
  */
 static LIST_HEAD(pnfs_modules_tbl);
 
-static int
-pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
-                      enum pnfs_iomode iomode, bool sync);
+static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
 
 /* Return the registered pnfs layout driver module matching given id */
 static struct pnfs_layoutdriver_type *
@@ -243,6 +241,8 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 {
        struct inode *inode = lo->plh_inode;
 
+       pnfs_layoutreturn_before_put_layout_hdr(lo);
+
        if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
                if (!list_empty(&lo->plh_segs))
                        WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
@@ -345,58 +345,6 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
 }
 
-/* Return true if layoutreturn is needed */
-static bool
-pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
-                       struct pnfs_layout_segment *lseg)
-{
-       struct pnfs_layout_segment *s;
-
-       if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
-               return false;
-
-       list_for_each_entry(s, &lo->plh_segs, pls_list)
-               if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
-                       return false;
-
-       return true;
-}
-
-static bool
-pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
-{
-       if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
-               return false;
-       lo->plh_return_iomode = 0;
-       pnfs_get_layout_hdr(lo);
-       clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
-       return true;
-}
-
-static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
-               struct pnfs_layout_hdr *lo, struct inode *inode)
-{
-       lo = lseg->pls_layout;
-       inode = lo->plh_inode;
-
-       spin_lock(&inode->i_lock);
-       if (pnfs_layout_need_return(lo, lseg)) {
-               nfs4_stateid stateid;
-               enum pnfs_iomode iomode;
-               bool send;
-
-               nfs4_stateid_copy(&stateid, &lo->plh_stateid);
-               iomode = lo->plh_return_iomode;
-               send = pnfs_prepare_layoutreturn(lo);
-               spin_unlock(&inode->i_lock);
-               if (send) {
-                       /* Send an async layoutreturn so we dont deadlock */
-                       pnfs_send_layoutreturn(lo, &stateid, iomode, false);
-               }
-       } else
-               spin_unlock(&inode->i_lock);
-}
-
 void
 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 {
@@ -410,15 +358,8 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
                atomic_read(&lseg->pls_refcount),
                test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 
-       /* Handle the case where refcount != 1 */
-       if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
-               return;
-
        lo = lseg->pls_layout;
        inode = lo->plh_inode;
-       /* Do we need a layoutreturn? */
-       if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
-               pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
 
        if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
                if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
@@ -937,6 +878,17 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
        rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
+static bool
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+{
+       if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+               return false;
+       lo->plh_return_iomode = 0;
+       pnfs_get_layout_hdr(lo);
+       clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+       return true;
+}
+
 static int
 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
                       enum pnfs_iomode iomode, bool sync)
@@ -971,6 +923,48 @@ out:
        return status;
 }
 
+/* Return true if layoutreturn is needed */
+static bool
+pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
+{
+       struct pnfs_layout_segment *s;
+
+       if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+               return false;
+
+       /* Defer layoutreturn until all lsegs are done */
+       list_for_each_entry(s, &lo->plh_segs, pls_list) {
+               if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+                       return false;
+       }
+
+       return true;
+}
+
+static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+       struct inode *inode= lo->plh_inode;
+
+       if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+               return;
+       spin_lock(&inode->i_lock);
+       if (pnfs_layout_need_return(lo)) {
+               nfs4_stateid stateid;
+               enum pnfs_iomode iomode;
+               bool send;
+
+               nfs4_stateid_copy(&stateid, &lo->plh_stateid);
+               iomode = lo->plh_return_iomode;
+               send = pnfs_prepare_layoutreturn(lo);
+               spin_unlock(&inode->i_lock);
+               if (send) {
+                       /* Send an async layoutreturn so we dont deadlock */
+                       pnfs_send_layoutreturn(lo, &stateid, iomode, false);
+               }
+       } else
+               spin_unlock(&inode->i_lock);
+}
+
 /*
  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  * when the layout segment list is empty.
@@ -1091,7 +1085,7 @@ bool pnfs_roc(struct inode *ino)
 
        nfs4_stateid_copy(&stateid, &lo->plh_stateid);
        /* always send layoutreturn if being marked so */
-       if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+       if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED,
                                   &lo->plh_flags))
                layoutreturn = pnfs_prepare_layoutreturn(lo);
 
@@ -1772,7 +1766,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
                        pnfs_set_plh_return_iomode(lo, return_range->iomode);
                        if (!mark_lseg_invalid(lseg, tmp_list))
                                remaining++;
-                       set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                       set_bit(NFS_LAYOUT_RETURN_REQUESTED,
                                        &lo->plh_flags);
                }
        return remaining;
index 9f4e2a47f4aa4ff41a590178dde2ddf160ae244f..1ac1db5f6dadb6508cf8658a53385add279eb9fa 100644 (file)
@@ -94,8 +94,8 @@ enum {
        NFS_LAYOUT_RO_FAILED = 0,       /* get ro layout failed stop trying */
        NFS_LAYOUT_RW_FAILED,           /* get rw layout failed stop trying */
        NFS_LAYOUT_BULK_RECALL,         /* bulk recall affecting layout */
-       NFS_LAYOUT_RETURN,              /* Return this layout ASAP */
-       NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */
+       NFS_LAYOUT_RETURN,              /* layoutreturn in progress */
+       NFS_LAYOUT_RETURN_REQUESTED,    /* Return this layout ASAP */
        NFS_LAYOUT_INVALID_STID,        /* layout stateid id is invalid */
        NFS_LAYOUT_FIRST_LAYOUTGET,     /* Serialize first layoutget */
 };
index 794fd1587f34a3bc210d60770525a02f3cd59aba..5dcc5f5a842ea5eaf4a0ea471d87c54326c0fe16 100644 (file)
@@ -620,7 +620,7 @@ bail:
  * particularly interested in the aio/dio case.  We use the rw_lock DLM lock
  * to protect io on one node from truncation on another.
  */
-static void ocfs2_dio_end_io(struct kiocb *iocb,
+static int ocfs2_dio_end_io(struct kiocb *iocb,
                             loff_t offset,
                             ssize_t bytes,
                             void *private)
@@ -628,6 +628,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
        struct inode *inode = file_inode(iocb->ki_filp);
        int level;
 
+       if (bytes <= 0)
+               return 0;
+
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
 
@@ -644,6 +647,8 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
                level = ocfs2_iocb_rw_locked_level(iocb);
                ocfs2_rw_unlock(inode, level);
        }
+
+       return 0;
 }
 
 static int ocfs2_releasepage(struct page *page, gfp_t wait)
index a3cc6d2fc896cea05a8605dcc84e09b7840a5e1a..a76b9ea7722e6d7d00d8c0c58217a4a0387f0ff0 100644 (file)
@@ -1254,15 +1254,15 @@ static const struct file_operations o2hb_debug_fops = {
 
 void o2hb_exit(void)
 {
-       kfree(o2hb_db_livenodes);
-       kfree(o2hb_db_liveregions);
-       kfree(o2hb_db_quorumregions);
-       kfree(o2hb_db_failedregions);
        debugfs_remove(o2hb_debug_failedregions);
        debugfs_remove(o2hb_debug_quorumregions);
        debugfs_remove(o2hb_debug_liveregions);
        debugfs_remove(o2hb_debug_livenodes);
        debugfs_remove(o2hb_debug_dir);
+       kfree(o2hb_db_livenodes);
+       kfree(o2hb_db_liveregions);
+       kfree(o2hb_db_quorumregions);
+       kfree(o2hb_db_failedregions);
 }
 
 static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
@@ -1438,13 +1438,15 @@ static void o2hb_region_release(struct config_item *item)
 
        kfree(reg->hr_slots);
 
-       kfree(reg->hr_db_regnum);
-       kfree(reg->hr_db_livenodes);
        debugfs_remove(reg->hr_debug_livenodes);
        debugfs_remove(reg->hr_debug_regnum);
        debugfs_remove(reg->hr_debug_elapsed_time);
        debugfs_remove(reg->hr_debug_pinned);
        debugfs_remove(reg->hr_debug_dir);
+       kfree(reg->hr_db_livenodes);
+       kfree(reg->hr_db_regnum);
+       kfree(reg->hr_debug_elapsed_time);
+       kfree(reg->hr_debug_pinned);
 
        spin_lock(&o2hb_live_lock);
        list_del(&reg->hr_all_item);
index c5bdf02c213bd7ad5c086331a1ea8194bde40578..b94a425f0175fa84818d78742659c42293d96dba 100644 (file)
@@ -2367,6 +2367,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                                                break;
                                        }
                                }
+                               dlm_lockres_clear_refmap_bit(dlm, res,
+                                               dead_node);
                                spin_unlock(&res->spinlock);
                                continue;
                        }
index 6cb019b7c6a83c4ec449baff12652e84b5fca06a..a52a2dbc064e2d87919f934504fb1f156c73d176 100644 (file)
@@ -2035,6 +2035,8 @@ DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot);
 
 DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot);
 
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_get_next_id);
+
 DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty);
 
 /* End of trace events for fs/ocfs2/quota_global.c. */
index 9c9dd30bc94541fa89baed2b0ac8c2a3c8a72e31..91bc674203ed6c8d138b00f1edede8f11f2816c1 100644 (file)
@@ -860,6 +860,30 @@ out:
        return status;
 }
 
+static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       int type = qid->type;
+       struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
+       int status = 0;
+
+       trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+       status = ocfs2_lock_global_qf(info, 0);
+       if (status < 0)
+               goto out;
+       status = ocfs2_qinfo_lock(info, 0);
+       if (status < 0)
+               goto out_global;
+       status = qtree_get_next_id(&info->dqi_gi, qid);
+       ocfs2_qinfo_unlock(info, 0);
+out_global:
+       ocfs2_unlock_global_qf(info, 0);
+out:
+       /* Avoid logging ENOENT since it just means there isn't next ID */
+       if (status && status != -ENOENT)
+               mlog_errno(status);
+       return status;
+}
+
 static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
 {
        unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
@@ -968,4 +992,5 @@ const struct dquot_operations ocfs2_quota_operations = {
        .write_info     = ocfs2_write_info,
        .alloc_dquot    = ocfs2_alloc_dquot,
        .destroy_dquot  = ocfs2_destroy_dquot,
+       .get_next_id    = ocfs2_get_next_id,
 };
diff --git a/fs/orangefs/Kconfig b/fs/orangefs/Kconfig
new file mode 100644 (file)
index 0000000..1554c02
--- /dev/null
@@ -0,0 +1,6 @@
+config ORANGEFS_FS
+       tristate "ORANGEFS (Powered by PVFS) support"
+       select FS_POSIX_ACL
+       help
+          Orange is a parallel file system designed for use on high end
+          computing (HEC) systems.
diff --git a/fs/orangefs/Makefile b/fs/orangefs/Makefile
new file mode 100644 (file)
index 0000000..a9d6a96
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the ORANGEFS filesystem.
+#
+
+obj-$(CONFIG_ORANGEFS_FS) += orangefs.o
+
+orangefs-objs := acl.o file.o orangefs-cache.o orangefs-utils.o xattr.o \
+                dcache.o inode.o orangefs-sysfs.o orangefs-mod.o super.o \
+                devorangefs-req.o namei.o symlink.o dir.o orangefs-bufmap.o \
+                orangefs-debugfs.o waitqueue.o
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
new file mode 100644 (file)
index 0000000..03f89db
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/fs_struct.h>
+
+struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
+{
+       struct posix_acl *acl;
+       int ret;
+       char *key = NULL, *value = NULL;
+
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               key = ORANGEFS_XATTR_NAME_ACL_ACCESS;
+               break;
+       case ACL_TYPE_DEFAULT:
+               key = ORANGEFS_XATTR_NAME_ACL_DEFAULT;
+               break;
+       default:
+               gossip_err("orangefs_get_acl: bogus value of type %d\n", type);
+               return ERR_PTR(-EINVAL);
+       }
+       /*
+        * Rather than incurring a network call just to determine the exact
+        * length of the attribute, I just allocate a max length to save on
+        * the network call. Conceivably, we could pass NULL to
+        * orangefs_inode_getxattr() to probe the length of the value, but
+        * I don't do that for now.
+        */
+       value = kmalloc(ORANGEFS_MAX_XATTR_VALUELEN, GFP_KERNEL);
+       if (value == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       gossip_debug(GOSSIP_ACL_DEBUG,
+                    "inode %pU, key %s, type %d\n",
+                    get_khandle_from_ino(inode),
+                    key,
+                    type);
+       ret = orangefs_inode_getxattr(inode,
+                                  "",
+                                  key,
+                                  value,
+                                  ORANGEFS_MAX_XATTR_VALUELEN);
+       /* if the key exists, convert it to an in-memory rep */
+       if (ret > 0) {
+               acl = posix_acl_from_xattr(&init_user_ns, value, ret);
+       } else if (ret == -ENODATA || ret == -ENOSYS) {
+               acl = NULL;
+       } else {
+               gossip_err("inode %pU retrieving acl's failed with error %d\n",
+                          get_khandle_from_ino(inode),
+                          ret);
+               acl = ERR_PTR(ret);
+       }
+       /* kfree(NULL) is safe, so don't worry if value ever got used */
+       kfree(value);
+       return acl;
+}
+
+int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       int error = 0;
+       void *value = NULL;
+       size_t size = 0;
+       const char *name = NULL;
+
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               name = ORANGEFS_XATTR_NAME_ACL_ACCESS;
+               if (acl) {
+                       umode_t mode = inode->i_mode;
+                       /*
+                        * can we represent this with the traditional file
+                        * mode permission bits?
+                        */
+                       error = posix_acl_equiv_mode(acl, &mode);
+                       if (error < 0) {
+                               gossip_err("%s: posix_acl_equiv_mode err: %d\n",
+                                          __func__,
+                                          error);
+                               return error;
+                       }
+
+                       if (inode->i_mode != mode)
+                               SetModeFlag(orangefs_inode);
+                       inode->i_mode = mode;
+                       mark_inode_dirty_sync(inode);
+                       if (error == 0)
+                               acl = NULL;
+               }
+               break;
+       case ACL_TYPE_DEFAULT:
+               name = ORANGEFS_XATTR_NAME_ACL_DEFAULT;
+               break;
+       default:
+               gossip_err("%s: invalid type %d!\n", __func__, type);
+               return -EINVAL;
+       }
+
+       gossip_debug(GOSSIP_ACL_DEBUG,
+                    "%s: inode %pU, key %s type %d\n",
+                    __func__, get_khandle_from_ino(inode),
+                    name,
+                    type);
+
+       if (acl) {
+               size = posix_acl_xattr_size(acl->a_count);
+               value = kmalloc(size, GFP_KERNEL);
+               if (!value)
+                       return -ENOMEM;
+
+               error = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+               if (error < 0)
+                       goto out;
+       }
+
+       gossip_debug(GOSSIP_ACL_DEBUG,
+                    "%s: name %s, value %p, size %zd, acl %p\n",
+                    __func__, name, value, size, acl);
+       /*
+        * Go ahead and set the extended attribute now. NOTE: Suppose acl
+        * was NULL, then value will be NULL and size will be 0 and that
+        * will xlate to a removexattr. However, we don't want removexattr
+        * complain if attributes does not exist.
+        */
+       error = orangefs_inode_setxattr(inode, "", name, value, size, 0);
+
+out:
+       kfree(value);
+       if (!error)
+               set_cached_acl(inode, type, acl);
+       return error;
+}
+
+int orangefs_init_acl(struct inode *inode, struct inode *dir)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct posix_acl *default_acl, *acl;
+       umode_t mode = inode->i_mode;
+       int error = 0;
+
+       ClearModeFlag(orangefs_inode);
+
+       error = posix_acl_create(dir, &mode, &default_acl, &acl);
+       if (error)
+               return error;
+
+       if (default_acl) {
+               error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+               posix_acl_release(default_acl);
+       }
+
+       if (acl) {
+               if (!error)
+                       error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+               posix_acl_release(acl);
+       }
+
+       /* If mode of the inode was changed, then do a forcible ->setattr */
+       if (mode != inode->i_mode) {
+               SetModeFlag(orangefs_inode);
+               inode->i_mode = mode;
+               orangefs_flush_inode(inode);
+       }
+
+       return error;
+}
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
new file mode 100644 (file)
index 0000000..a6911db
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Implementation of dentry (directory cache) functions.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+/* Returns 1 if dentry can still be trusted, else 0. */
+static int orangefs_revalidate_lookup(struct dentry *dentry)
+{
+       struct dentry *parent_dentry = dget_parent(dentry);
+       struct inode *parent_inode = parent_dentry->d_inode;
+       struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode);
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_kernel_op_s *new_op;
+       int ret = 0;
+       int err = 0;
+
+       gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP);
+       if (!new_op)
+               goto out_put_parent;
+
+       new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
+       new_op->upcall.req.lookup.parent_refn = parent->refn;
+       strncpy(new_op->upcall.req.lookup.d_name,
+               dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                    "%s:%s:%d interrupt flag [%d]\n",
+                    __FILE__,
+                    __func__,
+                    __LINE__,
+                    get_interruptible_flag(parent_inode));
+
+       err = service_operation(new_op, "orangefs_lookup",
+                       get_interruptible_flag(parent_inode));
+
+       /* Positive dentry: reject if error or not the same inode. */
+       if (inode) {
+               if (err) {
+                       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                           "%s:%s:%d lookup failure.\n",
+                           __FILE__, __func__, __LINE__);
+                       goto out_drop;
+               }
+               if (!match_handle(new_op->downcall.resp.lookup.refn.khandle,
+                   inode)) {
+                       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                           "%s:%s:%d no match.\n",
+                           __FILE__, __func__, __LINE__);
+                       goto out_drop;
+               }
+
+       /* Negative dentry: reject if success or error other than ENOENT. */
+       } else {
+               gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: negative dentry.\n",
+                   __func__);
+               if (!err || err != -ENOENT) {
+                       if (new_op->downcall.status != 0)
+                               gossip_debug(GOSSIP_DCACHE_DEBUG,
+                                   "%s:%s:%d lookup failure.\n",
+                                   __FILE__, __func__, __LINE__);
+                       goto out_drop;
+               }
+       }
+
+       ret = 1;
+out_release_op:
+       op_release(new_op);
+out_put_parent:
+       dput(parent_dentry);
+       return ret;
+out_drop:
+       gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n",
+           __FILE__, __func__, __LINE__);
+       d_drop(dentry);
+       goto out_release_op;
+}
+
+/*
+ * Verify that dentry is valid.
+ *
+ * Should return 1 if dentry can still be trusted, else 0.
+ */
+static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       int ret;
+
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: called on dentry %p.\n",
+                    __func__, dentry);
+
+       /* skip root handle lookups. */
+       if (dentry->d_inode && is_root_handle(dentry->d_inode))
+               return 1;
+
+       /*
+        * If this passes, the positive dentry still exists or the negative
+        * dentry still does not exist.
+        */
+       if (!orangefs_revalidate_lookup(dentry)) {
+               d_drop(dentry);
+               return 0;
+       }
+
+       /* We do not need to continue with negative dentries. */
+       if (!dentry->d_inode)
+               goto out;
+
+       /* Now we must perform a getattr to validate the inode contents. */
+
+       ret = orangefs_inode_getattr(dentry->d_inode,
+           ORANGEFS_ATTR_SYS_TYPE|ORANGEFS_ATTR_SYS_LNK_TARGET, 1);
+       if (ret < 0) {
+               gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d getattr failure.\n",
+                   __FILE__, __func__, __LINE__);
+               d_drop(dentry);
+               return 0;
+       }
+       if (ret == 0) {
+               d_drop(dentry);
+               return 0;
+       }
+
+out:
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+           "%s: negative dentry or positive dentry and inode valid.\n",
+           __func__);
+       return 1;
+}
+
+const struct dentry_operations orangefs_dentry_operations = {
+       .d_revalidate = orangefs_d_revalidate,
+};
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
new file mode 100644 (file)
index 0000000..37278f5
--- /dev/null
@@ -0,0 +1,933 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * Changes by Acxiom Corporation to add protocol version to kernel
+ * communication, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-dev-proto.h"
+#include "orangefs-bufmap.h"
+
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+/* this file implements the /dev/pvfs2-req device node */
+
+static int open_access_count;
+
+#define DUMP_DEVICE_ERROR()                                                   \
+do {                                                                          \
+       gossip_err("*****************************************************\n");\
+       gossip_err("ORANGEFS Device Error:  You cannot open the device file ");  \
+       gossip_err("\n/dev/%s more than once.  Please make sure that\nthere " \
+                  "are no ", ORANGEFS_REQDEVICE_NAME);                          \
+       gossip_err("instances of a program using this device\ncurrently "     \
+                  "running. (You must verify this!)\n");                     \
+       gossip_err("For example, you can use the lsof program as follows:\n");\
+       gossip_err("'lsof | grep %s' (run this as root)\n",                   \
+                  ORANGEFS_REQDEVICE_NAME);                                     \
+       gossip_err("  open_access_count = %d\n", open_access_count);          \
+       gossip_err("*****************************************************\n");\
+} while (0)
+
+static int hash_func(__u64 tag, int table_size)
+{
+       return do_div(tag, (unsigned int)table_size);
+}
+
+static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
+{
+       int index = hash_func(op->tag, hash_table_size);
+
+       list_add_tail(&op->list, &htable_ops_in_progress[index]);
+}
+
+static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
+{
+       struct orangefs_kernel_op_s *op, *next;
+       int index;
+
+       index = hash_func(tag, hash_table_size);
+
+       spin_lock(&htable_ops_in_progress_lock);
+       list_for_each_entry_safe(op,
+                                next,
+                                &htable_ops_in_progress[index],
+                                list) {
+               if (op->tag == tag && !op_state_purged(op)) {
+                       list_del_init(&op->list);
+                       get_op(op); /* increase ref count. */
+                       spin_unlock(&htable_ops_in_progress_lock);
+                       return op;
+               }
+       }
+
+       spin_unlock(&htable_ops_in_progress_lock);
+       return NULL;
+}
+
+static int orangefs_devreq_open(struct inode *inode, struct file *file)
+{
+       int ret = -EINVAL;
+
+       if (!(file->f_flags & O_NONBLOCK)) {
+               gossip_err("%s: device cannot be opened in blocking mode\n",
+                          __func__);
+               goto out;
+       }
+       ret = -EACCES;
+       gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
+       mutex_lock(&devreq_mutex);
+
+       if (open_access_count == 0) {
+               open_access_count = 1;
+               ret = 0;
+       } else {
+               DUMP_DEVICE_ERROR();
+       }
+       mutex_unlock(&devreq_mutex);
+
+out:
+
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "pvfs2-client-core: open device complete (ret = %d)\n",
+                    ret);
+       return ret;
+}
+
+/* Function for read() callers into the device */
+static ssize_t orangefs_devreq_read(struct file *file,
+                                char __user *buf,
+                                size_t count, loff_t *offset)
+{
+       struct orangefs_kernel_op_s *op, *temp;
+       __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
+       static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
+       struct orangefs_kernel_op_s *cur_op = NULL;
+       unsigned long ret;
+
+       /* We do not support blocking IO. */
+       if (!(file->f_flags & O_NONBLOCK)) {
+               gossip_err("%s: blocking read from client-core.\n",
+                          __func__);
+               return -EINVAL;
+       }
+
+       /*
+        * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
+        * always read with that size buffer.
+        */
+       if (count != MAX_DEV_REQ_UPSIZE) {
+               gossip_err("orangefs: client-core tried to read wrong size\n");
+               return -EINVAL;
+       }
+
+restart:
+       /* Get next op (if any) from top of list. */
+       spin_lock(&orangefs_request_list_lock);
+       list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
+               __s32 fsid;
+               /* This lock is held past the end of the loop when we break. */
+               spin_lock(&op->lock);
+               if (unlikely(op_state_purged(op))) {
+                       spin_unlock(&op->lock);
+                       continue;
+               }
+
+               fsid = fsid_of_op(op);
+               if (fsid != ORANGEFS_FS_ID_NULL) {
+                       int ret;
+                       /* Skip ops whose filesystem needs to be mounted. */
+                       ret = fs_mount_pending(fsid);
+                       if (ret == 1) {
+                               gossip_debug(GOSSIP_DEV_DEBUG,
+                                   "%s: mount pending, skipping op tag "
+                                   "%llu %s\n",
+                                   __func__,
+                                   llu(op->tag),
+                                   get_opname_string(op));
+                               spin_unlock(&op->lock);
+                               continue;
+                       /*
+                        * Skip ops whose filesystem we don't know about unless
+                        * it is being mounted.
+                        */
+                       /* XXX: is there a better way to detect this? */
+                       } else if (ret == -1 &&
+                                  !(op->upcall.type ==
+                                       ORANGEFS_VFS_OP_FS_MOUNT ||
+                                    op->upcall.type ==
+                                       ORANGEFS_VFS_OP_GETATTR)) {
+                               gossip_debug(GOSSIP_DEV_DEBUG,
+                                   "orangefs: skipping op tag %llu %s\n",
+                                   llu(op->tag), get_opname_string(op));
+                               gossip_err(
+                                   "orangefs: ERROR: fs_mount_pending %d\n",
+                                   fsid);
+                               spin_unlock(&op->lock);
+                               continue;
+                       }
+               }
+               /*
+                * Either this op does not pertain to a filesystem, is mounting
+                * a filesystem, or pertains to a mounted filesystem. Let it
+                * through.
+                */
+               cur_op = op;
+               break;
+       }
+
+       /*
+        * At this point we either have a valid op and can continue or have not
+        * found an op and must ask the client to try again later.
+        */
+       if (!cur_op) {
+               spin_unlock(&orangefs_request_list_lock);
+               return -EAGAIN;
+       }
+
+       gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
+                    llu(cur_op->tag), get_opname_string(cur_op));
+
+       /*
+        * Such an op should never be on the list in the first place. If so, we
+        * will abort.
+        */
+       if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
+               gossip_err("orangefs: ERROR: Current op already queued.\n");
+               list_del(&cur_op->list);
+               spin_unlock(&cur_op->lock);
+               spin_unlock(&orangefs_request_list_lock);
+               return -EAGAIN;
+       }
+       list_del_init(&cur_op->list);
+       get_op(op);
+       spin_unlock(&orangefs_request_list_lock);
+
+       spin_unlock(&cur_op->lock);
+
+       /* Push the upcall out. */
+       ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
+       if (ret != 0)
+               goto error;
+       ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
+       if (ret != 0)
+               goto error;
+       ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
+       if (ret != 0)
+               goto error;
+       ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
+                          sizeof(struct orangefs_upcall_s));
+       if (ret != 0)
+               goto error;
+
+       spin_lock(&htable_ops_in_progress_lock);
+       spin_lock(&cur_op->lock);
+       if (unlikely(op_state_given_up(cur_op))) {
+               spin_unlock(&cur_op->lock);
+               spin_unlock(&htable_ops_in_progress_lock);
+               op_release(cur_op);
+               goto restart;
+       }
+
+       /*
+        * Set the operation to be in progress and move it between lists since
+        * it has been sent to the client.
+        */
+       set_op_state_inprogress(cur_op);
+       orangefs_devreq_add_op(cur_op);
+       spin_unlock(&cur_op->lock);
+       spin_unlock(&htable_ops_in_progress_lock);
+       op_release(cur_op);
+
+       /* The client only asks to read one size buffer. */
+       return MAX_DEV_REQ_UPSIZE;
+error:
+       /*
+        * We were unable to copy the op data to the client. Put the op back in
+        * list. If client has crashed, the op will be purged later when the
+        * device is released.
+        */
+       gossip_err("orangefs: Failed to copy data to user space\n");
+       spin_lock(&orangefs_request_list_lock);
+       spin_lock(&cur_op->lock);
+       if (likely(!op_state_given_up(cur_op))) {
+               set_op_state_waiting(cur_op);
+               list_add(&cur_op->list, &orangefs_request_list);
+       }
+       spin_unlock(&cur_op->lock);
+       spin_unlock(&orangefs_request_list_lock);
+       op_release(cur_op);
+       return -EFAULT;
+}
+
+/*
+ * Function for writev() callers into the device.
+ *
+ * Userspace should have written:
+ *  - __u32 version
+ *  - __u32 magic
+ *  - __u64 tag
+ *  - struct orangefs_downcall_s
+ *  - trailer buffer (in the case of READDIR operations)
+ */
+static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
+                                     struct iov_iter *iter)
+{
+       ssize_t ret;
+       struct orangefs_kernel_op_s *op = NULL;
+       struct {
+               __u32 version;
+               __u32 magic;
+               __u64 tag;
+       } head;
+       int total = ret = iov_iter_count(iter);
+       int n;
+       int downcall_size = sizeof(struct orangefs_downcall_s);
+       int head_size = sizeof(head);
+
+       gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
+                    __func__,
+                    total,
+                    ret);
+
+        if (total < MAX_DEV_REQ_DOWNSIZE) {
+               gossip_err("%s: total:%d: must be at least:%u:\n",
+                          __func__,
+                          total,
+                          (unsigned int) MAX_DEV_REQ_DOWNSIZE);
+               return -EFAULT;
+       }
+     
+       n = copy_from_iter(&head, head_size, iter);
+       if (n < head_size) {
+               gossip_err("%s: failed to copy head.\n", __func__);
+               return -EFAULT;
+       }
+
+       if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
+               gossip_err("%s: userspace claims version"
+                          "%d, minimum version required: %d.\n",
+                          __func__,
+                          head.version,
+                          ORANGEFS_MINIMUM_USERSPACE_VERSION);
+               return -EPROTO;
+       }
+
+       if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
+               gossip_err("Error: Device magic number does not match.\n");
+               return -EPROTO;
+       }
+
+       op = orangefs_devreq_remove_op(head.tag);
+       if (!op) {
+               gossip_err("WARNING: No one's waiting for tag %llu\n",
+                          llu(head.tag));
+               return ret;
+       }
+
+       n = copy_from_iter(&op->downcall, downcall_size, iter);
+       if (n != downcall_size) {
+               gossip_err("%s: failed to copy downcall.\n", __func__);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       if (op->downcall.status)
+               goto wakeup;
+
+       /*
+        * We've successfully peeled off the head and the downcall. 
+        * Something has gone awry if total doesn't equal the
+        * sum of head_size, downcall_size and trailer_size.
+        */
+       if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
+               gossip_err("%s: funky write, head_size:%d"
+                          ": downcall_size:%d: trailer_size:%lld"
+                          ": total size:%d:\n",
+                          __func__,
+                          head_size,
+                          downcall_size,
+                          op->downcall.trailer_size,
+                          total);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       /* Only READDIR operations should have trailers. */
+       if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
+           (op->downcall.trailer_size != 0)) {
+               gossip_err("%s: %x operation with trailer.",
+                          __func__,
+                          op->downcall.type);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       /* READDIR operations should always have trailers. */
+       if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
+           (op->downcall.trailer_size == 0)) {
+               gossip_err("%s: %x operation with no trailer.",
+                          __func__,
+                          op->downcall.type);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
+               goto wakeup;
+
+       op->downcall.trailer_buf =
+               vmalloc(op->downcall.trailer_size);
+       if (op->downcall.trailer_buf == NULL) {
+               gossip_err("%s: failed trailer vmalloc.\n",
+                          __func__);
+               ret = -ENOMEM;
+               goto Broken;
+       }
+       memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
+       n = copy_from_iter(op->downcall.trailer_buf,
+                          op->downcall.trailer_size,
+                          iter);
+       if (n != op->downcall.trailer_size) {
+               gossip_err("%s: failed to copy trailer.\n", __func__);
+               vfree(op->downcall.trailer_buf);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+wakeup:
+       /*
+        * tell the vfs op waiting on a waitqueue
+        * that this op is done
+        */
+       spin_lock(&op->lock);
+       if (unlikely(op_state_given_up(op))) {
+               spin_unlock(&op->lock);
+               goto out;
+       }
+       set_op_state_serviced(op);
+       spin_unlock(&op->lock);
+
+       /*
+        * If this operation is an I/O operation we need to wait
+        * for all data to be copied before we can return to avoid
+        * buffer corruption and races that can pull the buffers
+        * out from under us.
+        *
+        * Essentially we're synchronizing with other parts of the
+        * vfs implicitly by not allowing the user space
+        * application reading/writing this device to return until
+        * the buffers are done being used.
+        */
+       if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
+               long n = wait_for_completion_interruptible_timeout(&op->done,
+                                                       op_timeout_secs * HZ);
+               if (unlikely(n < 0)) {
+                       gossip_debug(GOSSIP_DEV_DEBUG,
+                               "%s: signal on I/O wait, aborting\n",
+                               __func__);
+               } else if (unlikely(n == 0)) {
+                       gossip_debug(GOSSIP_DEV_DEBUG,
+                               "%s: timed out.\n",
+                               __func__);
+               }
+       }
+out:
+       op_release(op);
+       return ret;
+
+Broken:
+       spin_lock(&op->lock);
+       if (!op_state_given_up(op)) {
+               op->downcall.status = ret;
+               set_op_state_serviced(op);
+       }
+       spin_unlock(&op->lock);
+       goto out;
+}
+
+/* Returns whether any FS are still pending remounted */
+static int mark_all_pending_mounts(void)
+{
+       int unmounted = 1;
+       struct orangefs_sb_info_s *orangefs_sb = NULL;
+
+       spin_lock(&orangefs_superblocks_lock);
+       list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
+               /* All of these file system require a remount */
+               orangefs_sb->mount_pending = 1;
+               unmounted = 0;
+       }
+       spin_unlock(&orangefs_superblocks_lock);
+       return unmounted;
+}
+
+/*
+ * Determine if a given file system needs to be remounted or not
+ *  Returns -1 on error
+ *           0 if already mounted
+ *           1 if needs remount
+ */
+int fs_mount_pending(__s32 fsid)
+{
+       int mount_pending = -1;
+       struct orangefs_sb_info_s *orangefs_sb = NULL;
+
+       spin_lock(&orangefs_superblocks_lock);
+       list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
+               if (orangefs_sb->fs_id == fsid) {
+                       mount_pending = orangefs_sb->mount_pending;
+                       break;
+               }
+       }
+       spin_unlock(&orangefs_superblocks_lock);
+       return mount_pending;
+}
+
+/*
+ * NOTE: gets called when the last reference to this device is dropped.
+ * Using the open_access_count variable, we enforce a reference count
+ * on this file so that it can be opened by only one process at a time.
+ * the devreq_mutex is used to make sure all i/o has completed
+ * before we call orangefs_bufmap_finalize, and similar such tricky
+ * situations
+ */
+static int orangefs_devreq_release(struct inode *inode, struct file *file)
+{
+       int unmounted = 0;
+
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "%s:pvfs2-client-core: exiting, closing device\n",
+                    __func__);
+
+       mutex_lock(&devreq_mutex);
+       if (orangefs_get_bufmap_init())
+               orangefs_bufmap_finalize();
+
+       open_access_count = -1;
+
+       unmounted = mark_all_pending_mounts();
+       gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
+                    (unmounted ? "UNMOUNTED" : "MOUNTED"));
+
+       /*
+        * Walk through the list of ops in the request list, mark them
+        * as purged and wake them up.
+        */
+       purge_waiting_ops();
+       /*
+        * Walk through the hash table of in progress operations; mark
+        * them as purged and wake them up
+        */
+       purge_inprogress_ops();
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "pvfs2-client-core: device close complete\n");
+       open_access_count = 0;
+       mutex_unlock(&devreq_mutex);
+       return 0;
+}
+
+int is_daemon_in_service(void)
+{
+       int in_service;
+
+       /*
+        * What this function does is checks if client-core is alive
+        * based on the access count we maintain on the device.
+        */
+       mutex_lock(&devreq_mutex);
+       in_service = open_access_count == 1 ? 0 : -EIO;
+       mutex_unlock(&devreq_mutex);
+       return in_service;
+}
+
+static inline long check_ioctl_command(unsigned int command)
+{
+       /* Check for valid ioctl codes */
+       if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
+               gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
+                       command,
+                       _IOC_TYPE(command),
+                       ORANGEFS_DEV_MAGIC);
+               return -EINVAL;
+       }
+       /* and valid ioctl commands */
+       if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
+               gossip_err("Invalid ioctl command number [%d >= %d]\n",
+                          _IOC_NR(command), ORANGEFS_DEV_MAXNR);
+               return -ENOIOCTLCMD;
+       }
+       return 0;
+}
+
+static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
+{
+       static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
+       static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
+       static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
+       struct ORANGEFS_dev_map_desc user_desc;
+       int ret = 0;
+       struct dev_mask_info_s mask_info = { 0 };
+       struct dev_mask2_info_s mask2_info = { 0, 0 };
+       int upstream_kmod = 1;
+       struct list_head *tmp = NULL;
+       struct orangefs_sb_info_s *orangefs_sb = NULL;
+
+       /* mtmoore: add locking here */
+
+       switch (command) {
+       case ORANGEFS_DEV_GET_MAGIC:
+               return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
+                       -EIO :
+                       0);
+       case ORANGEFS_DEV_GET_MAX_UPSIZE:
+               return ((put_user(max_up_size,
+                                 (__s32 __user *) arg) == -EFAULT) ?
+                                       -EIO :
+                                       0);
+       case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
+               return ((put_user(max_down_size,
+                                 (__s32 __user *) arg) == -EFAULT) ?
+                                       -EIO :
+                                       0);
+       case ORANGEFS_DEV_MAP:
+               ret = copy_from_user(&user_desc,
+                                    (struct ORANGEFS_dev_map_desc __user *)
+                                    arg,
+                                    sizeof(struct ORANGEFS_dev_map_desc));
+               if (orangefs_get_bufmap_init()) {
+                       return -EINVAL;
+               } else {
+                       return ret ?
+                              -EIO :
+                              orangefs_bufmap_initialize(&user_desc);
+               }
+       case ORANGEFS_DEV_REMOUNT_ALL:
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
+                            __func__);
+
+               /*
+                * remount all mounted orangefs volumes to regain the lost
+                * dynamic mount tables (if any) -- NOTE: this is done
+                * without keeping the superblock list locked due to the
+                * upcall/downcall waiting.  also, the request semaphore is
+                * used to ensure that no operations will be serviced until
+                * all of the remounts are serviced (to avoid ops between
+                * mounts to fail)
+                */
+               ret = mutex_lock_interruptible(&request_mutex);
+               if (ret < 0)
+                       return ret;
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "%s: priority remount in progress\n",
+                            __func__);
+               list_for_each(tmp, &orangefs_superblocks) {
+                       orangefs_sb =
+                               list_entry(tmp,
+                                          struct orangefs_sb_info_s,
+                                          list);
+                       if (orangefs_sb && (orangefs_sb->sb)) {
+                               gossip_debug(GOSSIP_DEV_DEBUG,
+                                            "%s: Remounting SB %p\n",
+                                            __func__,
+                                            orangefs_sb);
+
+                               ret = orangefs_remount(orangefs_sb->sb);
+                               if (ret) {
+                                       gossip_debug(GOSSIP_DEV_DEBUG,
+                                                    "SB %p remount failed\n",
+                                                    orangefs_sb);
+                                       break;
+                               }
+                       }
+               }
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "%s: priority remount complete\n",
+                            __func__);
+               mutex_unlock(&request_mutex);
+               return ret;
+
+       case ORANGEFS_DEV_UPSTREAM:
+               ret = copy_to_user((void __user *)arg,
+                                   &upstream_kmod,
+                                   sizeof(upstream_kmod));
+
+               if (ret != 0)
+                       return -EIO;
+               else
+                       return ret;
+
+       case ORANGEFS_DEV_CLIENT_MASK:
+               ret = copy_from_user(&mask2_info,
+                                    (void __user *)arg,
+                                    sizeof(struct dev_mask2_info_s));
+
+               if (ret != 0)
+                       return -EIO;
+
+               client_debug_mask.mask1 = mask2_info.mask1_value;
+               client_debug_mask.mask2 = mask2_info.mask2_value;
+
+               pr_info("%s: client debug mask has been been received "
+                       ":%llx: :%llx:\n",
+                       __func__,
+                       (unsigned long long)client_debug_mask.mask1,
+                       (unsigned long long)client_debug_mask.mask2);
+
+               return ret;
+
+       case ORANGEFS_DEV_CLIENT_STRING:
+               ret = copy_from_user(&client_debug_array_string,
+                                    (void __user *)arg,
+                                    ORANGEFS_MAX_DEBUG_STRING_LEN);
+               if (ret != 0) {
+                       pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
+                               __func__);
+                       return -EIO;
+               }
+
+               pr_info("%s: client debug array string has been received.\n",
+                       __func__);
+
+               if (!help_string_initialized) {
+
+                       /* Free the "we don't know yet" default string... */
+                       kfree(debug_help_string);
+
+                       /* build a proper debug help string */
+                       if (orangefs_prepare_debugfs_help_string(0)) {
+                               gossip_err("%s: no debug help string \n",
+                                          __func__);
+                               return -EIO;
+                       }
+
+                       /* Replace the boilerplate boot-time debug-help file. */
+                       debugfs_remove(help_file_dentry);
+
+                       help_file_dentry =
+                               debugfs_create_file(
+                                       ORANGEFS_KMOD_DEBUG_HELP_FILE,
+                                       0444,
+                                       debug_dir,
+                                       debug_help_string,
+                                       &debug_help_fops);
+
+                       if (!help_file_dentry) {
+                               gossip_err("%s: debugfs_create_file failed for"
+                                          " :%s:!\n",
+                                          __func__,
+                                          ORANGEFS_KMOD_DEBUG_HELP_FILE);
+                               return -EIO;
+                       }
+               }
+
+               debug_mask_to_string(&client_debug_mask, 1);
+
+               debugfs_remove(client_debug_dentry);
+
+               orangefs_client_debug_init();
+
+               help_string_initialized++;
+
+               return ret;
+
+       case ORANGEFS_DEV_DEBUG:
+               ret = copy_from_user(&mask_info,
+                                    (void __user *)arg,
+                                    sizeof(mask_info));
+
+               if (ret != 0)
+                       return -EIO;
+
+               if (mask_info.mask_type == KERNEL_MASK) {
+                       if ((mask_info.mask_value == 0)
+                           && (kernel_mask_set_mod_init)) {
+                               /*
+                                * the kernel debug mask was set when the
+                                * kernel module was loaded; don't override
+                                * it if the client-core was started without
+                                * a value for ORANGEFS_KMODMASK.
+                                */
+                               return 0;
+                       }
+                       debug_mask_to_string(&mask_info.mask_value,
+                                            mask_info.mask_type);
+                       gossip_debug_mask = mask_info.mask_value;
+                       pr_info("%s: kernel debug mask has been modified to "
+                               ":%s: :%llx:\n",
+                               __func__,
+                               kernel_debug_string,
+                               (unsigned long long)gossip_debug_mask);
+               } else if (mask_info.mask_type == CLIENT_MASK) {
+                       debug_mask_to_string(&mask_info.mask_value,
+                                            mask_info.mask_type);
+                       pr_info("%s: client debug mask has been modified to"
+                               ":%s: :%llx:\n",
+                               __func__,
+                               client_debug_string,
+                               llu(mask_info.mask_value));
+               } else {
+                       gossip_lerr("Invalid mask type....\n");
+                       return -EINVAL;
+               }
+
+               return ret;
+
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return -ENOIOCTLCMD;
+}
+
+static long orangefs_devreq_ioctl(struct file *file,
+                              unsigned int command, unsigned long arg)
+{
+       long ret;
+
+       /* Check for properly constructed commands */
+       ret = check_ioctl_command(command);
+       if (ret < 0)
+               return (int)ret;
+
+       return (int)dispatch_ioctl_command(command, arg);
+}
+
+#ifdef CONFIG_COMPAT           /* CONFIG_COMPAT is in .config */
+
+/*  Compat structure for the ORANGEFS_DEV_MAP ioctl */
+struct ORANGEFS_dev_map_desc32 {
+       compat_uptr_t ptr;
+       __s32 total_size;
+       __s32 size;
+       __s32 count;
+};
+
+static unsigned long translate_dev_map26(unsigned long args, long *error)
+{
+       struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
+       /*
+        * Depending on the architecture, allocate some space on the
+        * user-call-stack based on our expected layout.
+        */
+       struct ORANGEFS_dev_map_desc __user *p =
+           compat_alloc_user_space(sizeof(*p));
+       compat_uptr_t addr;
+
+       *error = 0;
+       /* get the ptr from the 32 bit user-space */
+       if (get_user(addr, &p32->ptr))
+               goto err;
+       /* try to put that into a 64-bit layout */
+       if (put_user(compat_ptr(addr), &p->ptr))
+               goto err;
+       /* copy the remaining fields */
+       if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
+               goto err;
+       if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
+               goto err;
+       if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
+               goto err;
+       return (unsigned long)p;
+err:
+       *error = -EFAULT;
+       return 0;
+}
+
+/*
+ * 32 bit user-space apps' ioctl handlers when kernel modules
+ * is compiled as a 64 bit one
+ */
+static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
+                                     unsigned long args)
+{
+       long ret;
+       unsigned long arg = args;
+
+       /* Check for properly constructed commands */
+       ret = check_ioctl_command(cmd);
+       if (ret < 0)
+               return ret;
+       if (cmd == ORANGEFS_DEV_MAP) {
+               /*
+                * convert the arguments to what we expect internally
+                * in kernel space
+                */
+               arg = translate_dev_map26(args, &ret);
+               if (ret < 0) {
+                       gossip_err("Could not translate dev map\n");
+                       return ret;
+               }
+       }
+       /* no other ioctl requires translation */
+       return dispatch_ioctl_command(cmd, arg);
+}
+
+#endif /* CONFIG_COMPAT is in .config */
+
+/* the assigned character device major number */
+static int orangefs_dev_major;
+
+/*
+ * Initialize orangefs device specific state:
+ * Must be called at module load time only
+ */
+int orangefs_dev_init(void)
+{
+       /* register orangefs-req device  */
+       orangefs_dev_major = register_chrdev(0,
+                                         ORANGEFS_REQDEVICE_NAME,
+                                         &orangefs_devreq_file_operations);
+       if (orangefs_dev_major < 0) {
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "Failed to register /dev/%s (error %d)\n",
+                            ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
+               return orangefs_dev_major;
+       }
+
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "*** /dev/%s character device registered ***\n",
+                    ORANGEFS_REQDEVICE_NAME);
+       gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
+                    ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
+       return 0;
+}
+
+void orangefs_dev_cleanup(void)
+{
+       unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "*** /dev/%s character device unregistered ***\n",
+                    ORANGEFS_REQDEVICE_NAME);
+}
+
+static unsigned int orangefs_devreq_poll(struct file *file,
+                                     struct poll_table_struct *poll_table)
+{
+       int poll_revent_mask = 0;
+
+       poll_wait(file, &orangefs_request_list_waitq, poll_table);
+
+       if (!list_empty(&orangefs_request_list))
+               poll_revent_mask |= POLL_IN;
+       return poll_revent_mask;
+}
+
+const struct file_operations orangefs_devreq_file_operations = {
+       .owner = THIS_MODULE,
+       .read = orangefs_devreq_read,
+       .write_iter = orangefs_devreq_write_iter,
+       .open = orangefs_devreq_open,
+       .release = orangefs_devreq_release,
+       .unlocked_ioctl = orangefs_devreq_ioctl,
+
+#ifdef CONFIG_COMPAT           /* CONFIG_COMPAT is in .config */
+       .compat_ioctl = orangefs_devreq_compat_ioctl,
+#endif
+       .poll = orangefs_devreq_poll
+};
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
new file mode 100644 (file)
index 0000000..6f5836d
--- /dev/null
@@ -0,0 +1,446 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+struct readdir_handle_s {
+       int buffer_index;
+       struct orangefs_readdir_response_s readdir_response;
+       void *dents_buf;
+};
+
+/*
+ * decode routine used by kmod to deal with the blob sent from
+ * userspace for readdirs. The blob contains zero or more of these
+ * sub-blobs:
+ *   __u32 - represents length of the character string that follows.
+ *   string - between 1 and ORANGEFS_NAME_MAX bytes long.
+ *   padding - (if needed) to cause the __u32 plus the string to be
+ *             eight byte aligned.
+ *   khandle - sizeof(khandle) bytes.
+ */
+static long decode_dirents(char *ptr, size_t size,
+                           struct orangefs_readdir_response_s *readdir)
+{
+       int i;
+       struct orangefs_readdir_response_s *rd =
+               (struct orangefs_readdir_response_s *) ptr;
+       char *buf = ptr;
+       int khandle_size = sizeof(struct orangefs_khandle);
+       size_t offset = offsetof(struct orangefs_readdir_response_s,
+                               dirent_array);
+       /* 8 reflects eight byte alignment */
+       int smallest_blob = khandle_size + 8;
+       __u32 len;
+       int aligned_len;
+       int sizeof_u32 = sizeof(__u32);
+       long ret;
+
+       gossip_debug(GOSSIP_DIR_DEBUG, "%s: size:%zu:\n", __func__, size);
+
+       /* size is = offset on empty dirs, > offset on non-empty dirs... */
+       if (size < offset) {
+               gossip_err("%s: size:%zu: offset:%zu:\n",
+                          __func__,
+                          size,
+                          offset);
+               ret = -EINVAL;
+               goto out;
+       }
+
+        if ((size == offset) && (readdir->orangefs_dirent_outcount != 0)) {
+               gossip_err("%s: size:%zu: dirent_outcount:%d:\n",
+                          __func__,
+                          size,
+                          readdir->orangefs_dirent_outcount);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       readdir->token = rd->token;
+       readdir->orangefs_dirent_outcount = rd->orangefs_dirent_outcount;
+       readdir->dirent_array = kcalloc(readdir->orangefs_dirent_outcount,
+                                       sizeof(*readdir->dirent_array),
+                                       GFP_KERNEL);
+       if (readdir->dirent_array == NULL) {
+               gossip_err("%s: kcalloc failed.\n", __func__);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       buf += offset;
+       size -= offset;
+
+       for (i = 0; i < readdir->orangefs_dirent_outcount; i++) {
+               if (size < smallest_blob) {
+                       gossip_err("%s: size:%zu: smallest_blob:%d:\n",
+                                  __func__,
+                                  size,
+                                  smallest_blob);
+                       ret = -EINVAL;
+                       goto free;
+               }
+
+               len = *(__u32 *)buf;
+               if ((len < 1) || (len > ORANGEFS_NAME_MAX)) {
+                       gossip_err("%s: len:%d:\n", __func__, len);
+                       ret = -EINVAL;
+                       goto free;
+               }
+
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: size:%zu: len:%d:\n",
+                            __func__,
+                            size,
+                            len);
+
+               readdir->dirent_array[i].d_name = buf + sizeof_u32;
+               readdir->dirent_array[i].d_length = len;
+
+               /*
+                * Calculate "aligned" length of this string and its
+                * associated __u32 descriptor.
+                */
+               aligned_len = ((sizeof_u32 + len + 1) + 7) & ~7;
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: aligned_len:%d:\n",
+                            __func__,
+                            aligned_len);
+
+               /*
+                * The end of the blob should coincide with the end
+                * of the last sub-blob.
+                */
+               if (size < aligned_len + khandle_size) {
+                       gossip_err("%s: ran off the end of the blob.\n",
+                                  __func__);
+                       ret = -EINVAL;
+                       goto free;
+               }
+               size -= aligned_len + khandle_size;
+
+               buf += aligned_len;
+
+               readdir->dirent_array[i].khandle =
+                       *(struct orangefs_khandle *) buf;
+               buf += khandle_size;
+       }
+       ret = buf - ptr;
+       gossip_debug(GOSSIP_DIR_DEBUG, "%s: returning:%ld:\n", __func__, ret);
+       goto out;
+
+free:
+       kfree(readdir->dirent_array);
+       readdir->dirent_array = NULL;
+
+out:
+       return ret;
+}
+
+static long readdir_handle_ctor(struct readdir_handle_s *rhandle, void *buf,
+                               size_t size, int buffer_index)
+{
+       long ret;
+
+       if (buf == NULL) {
+               gossip_err
+                   ("Invalid NULL buffer specified in readdir_handle_ctor\n");
+               return -ENOMEM;
+       }
+       if (buffer_index < 0) {
+               gossip_err
+                   ("Invalid buffer index specified in readdir_handle_ctor\n");
+               return -EINVAL;
+       }
+       rhandle->buffer_index = buffer_index;
+       rhandle->dents_buf = buf;
+       ret = decode_dirents(buf, size, &rhandle->readdir_response);
+       if (ret < 0) {
+               gossip_err("Could not decode readdir from buffer %ld\n", ret);
+               rhandle->buffer_index = -1;
+               gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", buf);
+               vfree(buf);
+               rhandle->dents_buf = NULL;
+       }
+       return ret;
+}
+
+static void readdir_handle_dtor(struct orangefs_bufmap *bufmap,
+               struct readdir_handle_s *rhandle)
+{
+       if (rhandle == NULL)
+               return;
+
+       /* kfree(NULL) is safe */
+       kfree(rhandle->readdir_response.dirent_array);
+       rhandle->readdir_response.dirent_array = NULL;
+
+       if (rhandle->buffer_index >= 0) {
+               orangefs_readdir_index_put(bufmap, rhandle->buffer_index);
+               rhandle->buffer_index = -1;
+       }
+       if (rhandle->dents_buf) {
+               gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n",
+                            rhandle->dents_buf);
+               vfree(rhandle->dents_buf);
+               rhandle->dents_buf = NULL;
+       }
+}
+
+/*
+ * Read directory entries from an instance of an open directory.
+ */
+static int orangefs_readdir(struct file *file, struct dir_context *ctx)
+{
+       struct orangefs_bufmap *bufmap = NULL;
+       int ret = 0;
+       int buffer_index;
+       /*
+        * ptoken supports Orangefs' distributed directory logic, added
+        * in 2.9.2.
+        */
+       __u64 *ptoken = file->private_data;
+       __u64 pos = 0;
+       ino_t ino = 0;
+       struct dentry *dentry = file->f_path.dentry;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
+       int buffer_full = 0;
+       struct readdir_handle_s rhandle;
+       int i = 0;
+       int len = 0;
+       ino_t current_ino = 0;
+       char *current_entry = NULL;
+       long bytes_decoded;
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "%s: ctx->pos:%lld, ptoken = %llu\n",
+                    __func__,
+                    lld(ctx->pos),
+                    llu(*ptoken));
+
+       pos = (__u64) ctx->pos;
+
+       /* are we done? */
+       if (pos == ORANGEFS_READDIR_END) {
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "Skipping to termination path\n");
+               return 0;
+       }
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "orangefs_readdir called on %s (pos=%llu)\n",
+                    dentry->d_name.name, llu(pos));
+
+       rhandle.buffer_index = -1;
+       rhandle.dents_buf = NULL;
+       memset(&rhandle.readdir_response, 0, sizeof(rhandle.readdir_response));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_READDIR);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->uses_shared_memory = 1;
+       new_op->upcall.req.readdir.refn = orangefs_inode->refn;
+       new_op->upcall.req.readdir.max_dirent_count =
+           ORANGEFS_MAX_DIRENT_COUNT_READDIR;
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "%s: upcall.req.readdir.refn.khandle: %pU\n",
+                    __func__,
+                    &new_op->upcall.req.readdir.refn.khandle);
+
+       new_op->upcall.req.readdir.token = *ptoken;
+
+get_new_buffer_index:
+       ret = orangefs_readdir_index_get(&bufmap, &buffer_index);
+       if (ret < 0) {
+               gossip_lerr("orangefs_readdir: orangefs_readdir_index_get() failure (%d)\n",
+                           ret);
+               goto out_free_op;
+       }
+       new_op->upcall.req.readdir.buf_index = buffer_index;
+
+       ret = service_operation(new_op,
+                               "orangefs_readdir",
+                               get_interruptible_flag(dentry->d_inode));
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "Readdir downcall status is %d.  ret:%d\n",
+                    new_op->downcall.status,
+                    ret);
+
+       if (ret == -EAGAIN && op_state_purged(new_op)) {
+               /*
+                * readdir shared memory aread has been wiped due to
+                * pvfs2-client-core restarting, so we must get a new
+                * index into the shared memory.
+                */
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                       "%s: Getting new buffer_index for retry of readdir..\n",
+                        __func__);
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               goto get_new_buffer_index;
+       }
+
+       if (ret == -EIO && op_state_purged(new_op)) {
+               gossip_err("%s: Client is down. Aborting readdir call.\n",
+                       __func__);
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               goto out_free_op;
+       }
+
+       if (ret < 0 || new_op->downcall.status != 0) {
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "Readdir request failed.  Status:%d\n",
+                            new_op->downcall.status);
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               if (ret >= 0)
+                       ret = new_op->downcall.status;
+               goto out_free_op;
+       }
+
+       bytes_decoded =
+               readdir_handle_ctor(&rhandle,
+                                   new_op->downcall.trailer_buf,
+                                   new_op->downcall.trailer_size,
+                                   buffer_index);
+       if (bytes_decoded < 0) {
+               gossip_err("orangefs_readdir: Could not decode trailer buffer into a readdir response %d\n",
+                       ret);
+               ret = bytes_decoded;
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               goto out_free_op;
+       }
+
+       if (bytes_decoded != new_op->downcall.trailer_size) {
+               gossip_err("orangefs_readdir: # bytes decoded (%ld) "
+                          "!= trailer size (%ld)\n",
+                          bytes_decoded,
+                          (long)new_op->downcall.trailer_size);
+               ret = -EINVAL;
+               goto out_destroy_handle;
+       }
+
+       /*
+        *  orangefs doesn't actually store dot and dot-dot, but
+        *  we need to have them represented.
+        */
+       if (pos == 0) {
+               ino = get_ino_from_khandle(dentry->d_inode);
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: calling dir_emit of \".\" with pos = %llu\n",
+                            __func__,
+                            llu(pos));
+               ret = dir_emit(ctx, ".", 1, ino, DT_DIR);
+               pos += 1;
+       }
+
+       if (pos == 1) {
+               ino = get_parent_ino_from_dentry(dentry);
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: calling dir_emit of \"..\" with pos = %llu\n",
+                            __func__,
+                            llu(pos));
+               ret = dir_emit(ctx, "..", 2, ino, DT_DIR);
+               pos += 1;
+       }
+
+       /*
+        * we stored ORANGEFS_ITERATE_NEXT in ctx->pos last time around
+        * to prevent "finding" dot and dot-dot on any iteration
+        * other than the first.
+        */
+       if (ctx->pos == ORANGEFS_ITERATE_NEXT)
+               ctx->pos = 0;
+
+       for (i = ctx->pos;
+            i < rhandle.readdir_response.orangefs_dirent_outcount;
+            i++) {
+               len = rhandle.readdir_response.dirent_array[i].d_length;
+               current_entry = rhandle.readdir_response.dirent_array[i].d_name;
+               current_ino = orangefs_khandle_to_ino(
+                       &(rhandle.readdir_response.dirent_array[i].khandle));
+
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "calling dir_emit for %s with len %d"
+                            ", ctx->pos %ld\n",
+                            current_entry,
+                            len,
+                            (unsigned long)ctx->pos);
+               /*
+                * type is unknown. We don't return object type
+                * in the dirent_array. This leaves getdents
+                * clueless about type.
+                */
+               ret =
+                   dir_emit(ctx, current_entry, len, current_ino, DT_UNKNOWN);
+               if (!ret)
+                       break;
+               ctx->pos++;
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                             "%s: ctx->pos:%lld\n",
+                             __func__,
+                             lld(ctx->pos));
+
+       }
+
+       /*
+        * we ran all the way through the last batch, set up for
+        * getting another batch...
+        */
+       if (ret) {
+               *ptoken = rhandle.readdir_response.token;
+               ctx->pos = ORANGEFS_ITERATE_NEXT;
+       }
+
+       /*
+        * Did we hit the end of the directory?
+        */
+       if (rhandle.readdir_response.token == ORANGEFS_READDIR_END &&
+           !buffer_full) {
+               gossip_debug(GOSSIP_DIR_DEBUG,
+               "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
+               ctx->pos = ORANGEFS_READDIR_END;
+       }
+
+out_destroy_handle:
+       readdir_handle_dtor(bufmap, &rhandle);
+out_free_op:
+       op_release(new_op);
+       gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
+       return ret;
+}
+
+static int orangefs_dir_open(struct inode *inode, struct file *file)
+{
+       __u64 *ptoken;
+
+       file->private_data = kmalloc(sizeof(__u64), GFP_KERNEL);
+       if (!file->private_data)
+               return -ENOMEM;
+
+       ptoken = file->private_data;
+       *ptoken = ORANGEFS_READDIR_START;
+       return 0;
+}
+
+static int orangefs_dir_release(struct inode *inode, struct file *file)
+{
+       orangefs_flush_inode(inode);
+       kfree(file->private_data);
+       return 0;
+}
+
+/** ORANGEFS implementation of VFS directory operations */
+const struct file_operations orangefs_dir_operations = {
+       .read = generic_read_dir,
+       .iterate = orangefs_readdir,
+       .open = orangefs_dir_open,
+       .release = orangefs_dir_release,
+};
diff --git a/fs/orangefs/downcall.h b/fs/orangefs/downcall.h
new file mode 100644 (file)
index 0000000..72d4cac
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Definitions of downcalls used in Linux kernel module.
+ */
+
+#ifndef __DOWNCALL_H
+#define __DOWNCALL_H
+
+/*
+ * Sanitized the device-client core interaction
+ * for clean 32-64 bit usage
+ */
+struct orangefs_io_response {
+       __s64 amt_complete;
+};
+
+struct orangefs_lookup_response {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_create_response {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_symlink_response {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_getattr_response {
+       struct ORANGEFS_sys_attr_s attributes;
+       char link_target[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_mkdir_response {
+       struct orangefs_object_kref refn;
+};
+
+/*
+ * duplication of some system interface structures so that I don't have
+ * to allocate extra memory
+ */
+struct orangefs_dirent {
+       char *d_name;
+       int d_length;
+       struct orangefs_khandle khandle;
+};
+
+struct orangefs_statfs_response {
+       __s64 block_size;
+       __s64 blocks_total;
+       __s64 blocks_avail;
+       __s64 files_total;
+       __s64 files_avail;
+};
+
+struct orangefs_fs_mount_response {
+       __s32 fs_id;
+       __s32 id;
+       struct orangefs_khandle root_khandle;
+};
+
+/* the getxattr response is the attribute value */
+struct orangefs_getxattr_response {
+       __s32 val_sz;
+       __s32 __pad1;
+       char val[ORANGEFS_MAX_XATTR_VALUELEN];
+};
+
+/* the listxattr response is an array of attribute names */
+struct orangefs_listxattr_response {
+       __s32 returned_count;
+       __s32 __pad1;
+       __u64 token;
+       char key[ORANGEFS_MAX_XATTR_LISTLEN * ORANGEFS_MAX_XATTR_NAMELEN];
+       __s32 keylen;
+       __s32 __pad2;
+       __s32 lengths[ORANGEFS_MAX_XATTR_LISTLEN];
+};
+
+struct orangefs_param_response {
+       __s64 value;
+};
+
+#define PERF_COUNT_BUF_SIZE 4096
+struct orangefs_perf_count_response {
+       char buffer[PERF_COUNT_BUF_SIZE];
+};
+
+#define FS_KEY_BUF_SIZE 4096
+struct orangefs_fs_key_response {
+       __s32 fs_keylen;
+       __s32 __pad1;
+       char fs_key[FS_KEY_BUF_SIZE];
+};
+
+struct orangefs_downcall_s {
+       __s32 type;
+       __s32 status;
+       /* currently trailer is used only by readdir */
+       __s64 trailer_size;
+       char *trailer_buf;
+
+       union {
+               struct orangefs_io_response io;
+               struct orangefs_lookup_response lookup;
+               struct orangefs_create_response create;
+               struct orangefs_symlink_response sym;
+               struct orangefs_getattr_response getattr;
+               struct orangefs_mkdir_response mkdir;
+               struct orangefs_statfs_response statfs;
+               struct orangefs_fs_mount_response fs_mount;
+               struct orangefs_getxattr_response getxattr;
+               struct orangefs_listxattr_response listxattr;
+               struct orangefs_param_response param;
+               struct orangefs_perf_count_response perf_count;
+               struct orangefs_fs_key_response fs_key;
+       } resp;
+};
+
+struct orangefs_readdir_response_s {
+       __u64 token;
+       __u64 directory_version;
+       __u32 __pad2;
+       __u32 orangefs_dirent_outcount;
+       struct orangefs_dirent *dirent_array;
+};
+
+#endif /* __DOWNCALL_H */
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
new file mode 100644 (file)
index 0000000..d865b58
--- /dev/null
@@ -0,0 +1,738 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS file operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+/*
+ * Copy to client-core's address space from the buffers specified
+ * by the iovec upto total_size bytes.
+ * NOTE: the iovector can either contain addresses which
+ *       can futher be kernel-space or user-space addresses.
+ *       or it can pointers to struct page's
+ */
+static int precopy_buffers(struct orangefs_bufmap *bufmap,
+                          int buffer_index,
+                          struct iov_iter *iter,
+                          size_t total_size)
+{
+       int ret = 0;
+       /*
+        * copy data from application/kernel by pulling it out
+        * of the iovec.
+        */
+
+
+       if (total_size) {
+               ret = orangefs_bufmap_copy_from_iovec(bufmap,
+                                                     iter,
+                                                     buffer_index,
+                                                     total_size);
+               if (ret < 0)
+               gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
+                          __func__,
+                          (long)ret);
+       }
+
+       if (ret < 0)
+               gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
+                       __func__,
+                       (long)ret);
+       return ret;
+}
+
+/*
+ * Copy from client-core's address space to the buffers specified
+ * by the iovec upto total_size bytes.
+ * NOTE: the iovector can either contain addresses which
+ *       can futher be kernel-space or user-space addresses.
+ *       or it can pointers to struct page's
+ */
+static int postcopy_buffers(struct orangefs_bufmap *bufmap,
+                           int buffer_index,
+                           struct iov_iter *iter,
+                           size_t total_size)
+{
+       int ret = 0;
+       /*
+        * copy data to application/kernel by pushing it out to
+        * the iovec. NOTE; target buffers can be addresses or
+        * struct page pointers.
+        */
+       if (total_size) {
+               ret = orangefs_bufmap_copy_to_iovec(bufmap,
+                                                   iter,
+                                                   buffer_index,
+                                                   total_size);
+               if (ret < 0)
+                       gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
+                               __func__,
+                               (long)ret);
+       }
+       return ret;
+}
+
+/*
+ * handles two possible error cases, depending on context.
+ *
+ * by design, our vfs i/o errors need to be handled in one of two ways,
+ * depending on where the error occured.
+ *
+ * if the error happens in the waitqueue code because we either timed
+ * out or a signal was raised while waiting, we need to cancel the
+ * userspace i/o operation and free the op manually.  this is done to
+ * avoid having the device start writing application data to our shared
+ * bufmap pages without us expecting it.
+ *
+ * FIXME: POSSIBLE OPTIMIZATION:
+ * However, if we timed out or if we got a signal AND our upcall was never
+ * picked off the queue (i.e. we were in OP_VFS_STATE_WAITING), then we don't
+ * need to send a cancellation upcall. The way we can handle this is
+ * set error_exit to 2 in such cases and 1 whenever cancellation has to be
+ * sent and have handle_error
+ * take care of this situation as well..
+ *
+ * if a orangefs sysint level error occured and i/o has been completed,
+ * there is no need to cancel the operation, as the user has finished
+ * using the bufmap page and so there is no danger in this case.  in
+ * this case, we wake up the device normally so that it may free the
+ * op, as normal.
+ *
+ * note the only reason this is a macro is because both read and write
+ * cases need the exact same handling code.
+ */
+#define handle_io_error()                                      \
+do {                                                           \
+       if (!op_state_serviced(new_op)) {                       \
+               orangefs_cancel_op_in_progress(new_op->tag);    \
+       } else {                                                \
+               complete(&new_op->done);                        \
+       }                                                       \
+       orangefs_bufmap_put(bufmap, buffer_index);              \
+       buffer_index = -1;                                      \
+} while (0)
+
+/*
+ * Post and wait for the I/O upcall to finish
+ */
+static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
+               loff_t *offset, struct iov_iter *iter,
+               size_t total_size, loff_t readahead_size)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
+       struct orangefs_bufmap *bufmap = NULL;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int buffer_index = -1;
+       ssize_t ret;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
+       if (!new_op)
+               return -ENOMEM;
+
+       /* synchronous I/O */
+       new_op->upcall.req.io.async_vfs_io = ORANGEFS_VFS_SYNC_IO;
+       new_op->upcall.req.io.readahead_size = readahead_size;
+       new_op->upcall.req.io.io_type = type;
+       new_op->upcall.req.io.refn = orangefs_inode->refn;
+
+populate_shared_memory:
+       /* get a shared buffer index */
+       ret = orangefs_bufmap_get(&bufmap, &buffer_index);
+       if (ret < 0) {
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s: orangefs_bufmap_get failure (%ld)\n",
+                            __func__, (long)ret);
+               goto out;
+       }
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): GET op %p -> buffer_index %d\n",
+                    __func__,
+                    handle,
+                    new_op,
+                    buffer_index);
+
+       new_op->uses_shared_memory = 1;
+       new_op->upcall.req.io.buf_index = buffer_index;
+       new_op->upcall.req.io.count = total_size;
+       new_op->upcall.req.io.offset = *offset;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): offset: %llu total_size: %zd\n",
+                    __func__,
+                    handle,
+                    llu(*offset),
+                    total_size);
+       /*
+        * Stage 1: copy the buffers into client-core's address space
+        * precopy_buffers only pertains to writes.
+        */
+       if (type == ORANGEFS_IO_WRITE) {
+               ret = precopy_buffers(bufmap,
+                                     buffer_index,
+                                     iter,
+                                     total_size);
+               if (ret < 0)
+                       goto out;
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): Calling post_io_request with tag (%llu)\n",
+                    __func__,
+                    handle,
+                    llu(new_op->tag));
+
+       /* Stage 2: Service the I/O operation */
+       ret = service_operation(new_op,
+                               type == ORANGEFS_IO_WRITE ?
+                                       "file_write" :
+                                       "file_read",
+                               get_interruptible_flag(inode));
+
+       /*
+        * If service_operation() returns -EAGAIN #and# the operation was
+        * purged from orangefs_request_list or htable_ops_in_progress, then
+        * we know that the client was restarted, causing the shared memory
+        * area to be wiped clean.  To restart a  write operation in this
+        * case, we must re-copy the data from the user's iovec to a NEW
+        * shared memory location. To restart a read operation, we must get
+        * a new shared memory location.
+        */
+       if (ret == -EAGAIN && op_state_purged(new_op)) {
+               orangefs_bufmap_put(bufmap, buffer_index);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s:going to repopulate_shared_memory.\n",
+                            __func__);
+               goto populate_shared_memory;
+       }
+
+       if (ret < 0) {
+               handle_io_error();
+               /*
+                * don't write an error to syslog on signaled operation
+                * termination unless we've got debugging turned on, as
+                * this can happen regularly (i.e. ctrl-c)
+                */
+               if (ret == -EINTR)
+                       gossip_debug(GOSSIP_FILE_DEBUG,
+                                    "%s: returning error %ld\n", __func__,
+                                    (long)ret);
+               else
+                       gossip_err("%s: error in %s handle %pU, returning %zd\n",
+                               __func__,
+                               type == ORANGEFS_IO_READ ?
+                                       "read from" : "write to",
+                               handle, ret);
+               goto out;
+       }
+
+       /*
+        * Stage 3: Post copy buffers from client-core's address space
+        * postcopy_buffers only pertains to reads.
+        */
+       if (type == ORANGEFS_IO_READ) {
+               ret = postcopy_buffers(bufmap,
+                                      buffer_index,
+                                      iter,
+                                      new_op->downcall.resp.io.amt_complete);
+               if (ret < 0) {
+                       /*
+                        * put error codes in downcall so that handle_io_error()
+                        * preserves it properly
+                        */
+                       WARN_ON(!op_state_serviced(new_op));
+                       new_op->downcall.status = ret;
+                       handle_io_error();
+                       goto out;
+               }
+       }
+       gossip_debug(GOSSIP_FILE_DEBUG,
+           "%s(%pU): Amount written as returned by the sys-io call:%d\n",
+           __func__,
+           handle,
+           (int)new_op->downcall.resp.io.amt_complete);
+
+       ret = new_op->downcall.resp.io.amt_complete;
+
+       /*
+        * tell the device file owner waiting on I/O that this read has
+        * completed and it can return now.
+        */
+       complete(&new_op->done);
+
+out:
+       if (buffer_index >= 0) {
+               orangefs_bufmap_put(bufmap, buffer_index);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): PUT buffer_index %d\n",
+                            __func__, handle, buffer_index);
+               buffer_index = -1;
+       }
+       op_release(new_op);
+       return ret;
+}
+
+/*
+ * Common entry point for read/write/readv/writev
+ * This function will dispatch it to either the direct I/O
+ * or buffered I/O path depending on the mount options and/or
+ * augmented/extended metadata attached to the file.
+ * Note: File extended attributes override any mount options.
+ */
+static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file,
+               loff_t *offset, struct iov_iter *iter)
+{
+       struct inode *inode = file->f_mapping->host;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
+       size_t count = iov_iter_count(iter);
+       ssize_t total_count = 0;
+       ssize_t ret = -EINVAL;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+               "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
+               __func__,
+               handle,
+               (int)count);
+
+       if (type == ORANGEFS_IO_WRITE) {
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): proceeding with offset : %llu, "
+                            "size %d\n",
+                            __func__,
+                            handle,
+                            llu(*offset),
+                            (int)count);
+       }
+
+       if (count == 0) {
+               ret = 0;
+               goto out;
+       }
+
+       while (iov_iter_count(iter)) {
+               size_t each_count = iov_iter_count(iter);
+               size_t amt_complete;
+
+               /* how much to transfer in this loop iteration */
+               if (each_count > orangefs_bufmap_size_query())
+                       each_count = orangefs_bufmap_size_query();
+
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): size of each_count(%d)\n",
+                            __func__,
+                            handle,
+                            (int)each_count);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): BEFORE wait_for_io: offset is %d\n",
+                            __func__,
+                            handle,
+                            (int)*offset);
+
+               ret = wait_for_direct_io(type, inode, offset, iter,
+                               each_count, 0);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): return from wait_for_io:%d\n",
+                            __func__,
+                            handle,
+                            (int)ret);
+
+               if (ret < 0)
+                       goto out;
+
+               *offset += ret;
+               total_count += ret;
+               amt_complete = ret;
+
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): AFTER wait_for_io: offset is %d\n",
+                            __func__,
+                            handle,
+                            (int)*offset);
+
+               /*
+                * if we got a short I/O operations,
+                * fall out and return what we got so far
+                */
+               if (amt_complete < each_count)
+                       break;
+       } /*end while */
+
+       if (total_count > 0)
+               ret = total_count;
+out:
+       if (ret > 0) {
+               if (type == ORANGEFS_IO_READ) {
+                       file_accessed(file);
+               } else {
+                       SetMtimeFlag(orangefs_inode);
+                       inode->i_mtime = CURRENT_TIME;
+                       mark_inode_dirty_sync(inode);
+               }
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): Value(%d) returned.\n",
+                    __func__,
+                    handle,
+                    (int)ret);
+
+       return ret;
+}
+
+/*
+ * Read data from a specified offset in a file (referenced by inode).
+ * Data may be placed either in a user or kernel buffer.
+ */
+ssize_t orangefs_inode_read(struct inode *inode,
+                           struct iov_iter *iter,
+                           loff_t *offset,
+                           loff_t readahead_size)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       size_t count = iov_iter_count(iter);
+       size_t bufmap_size;
+       ssize_t ret = -EINVAL;
+
+       g_orangefs_stats.reads++;
+
+       bufmap_size = orangefs_bufmap_size_query();
+       if (count > bufmap_size) {
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s: count is too large (%zd/%zd)!\n",
+                            __func__, count, bufmap_size);
+               return -EINVAL;
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU) %zd@%llu\n",
+                    __func__,
+                    &orangefs_inode->refn.khandle,
+                    count,
+                    llu(*offset));
+
+       ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter,
+                       count, readahead_size);
+       if (ret > 0)
+               *offset += ret;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): Value(%zd) returned.\n",
+                    __func__,
+                    &orangefs_inode->refn.khandle,
+                    ret);
+
+       return ret;
+}
+
+static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       loff_t pos = *(&iocb->ki_pos);
+       ssize_t rc = 0;
+
+       BUG_ON(iocb->private);
+
+       gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
+
+       g_orangefs_stats.reads++;
+
+       rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
+       iocb->ki_pos = pos;
+
+       return rc;
+}
+
+static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       loff_t pos;
+       ssize_t rc;
+
+       BUG_ON(iocb->private);
+
+       gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
+
+       mutex_lock(&file->f_mapping->host->i_mutex);
+
+       /* Make sure generic_write_checks sees an up to date inode size. */
+       if (file->f_flags & O_APPEND) {
+               rc = orangefs_inode_getattr(file->f_mapping->host,
+                                        ORANGEFS_ATTR_SYS_SIZE, 0);
+               if (rc) {
+                       gossip_err("%s: orangefs_inode_getattr failed, rc:%zd:.\n",
+                                  __func__, rc);
+                       goto out;
+               }
+       }
+
+       if (file->f_pos > i_size_read(file->f_mapping->host))
+               orangefs_i_size_write(file->f_mapping->host, file->f_pos);
+
+       rc = generic_write_checks(iocb, iter);
+
+       if (rc <= 0) {
+               gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
+                          __func__, rc);
+               goto out;
+       }
+
+       /*
+        * if we are appending, generic_write_checks would have updated
+        * pos to the end of the file, so we will wait till now to set
+        * pos...
+        */
+       pos = *(&iocb->ki_pos);
+
+       rc = do_readv_writev(ORANGEFS_IO_WRITE,
+                            file,
+                            &pos,
+                            iter);
+       if (rc < 0) {
+               gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
+                          __func__, rc);
+               goto out;
+       }
+
+       iocb->ki_pos = pos;
+       g_orangefs_stats.writes++;
+
+out:
+
+       mutex_unlock(&file->f_mapping->host->i_mutex);
+       return rc;
+}
+
+/*
+ * Perform a miscellaneous operation on a file.
+ */
+static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       int ret = -ENOTTY;
+       __u64 val = 0;
+       unsigned long uval;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_ioctl: called with cmd %d\n",
+                    cmd);
+
+       /*
+        * we understand some general ioctls on files, such as the immutable
+        * and append flags
+        */
+       if (cmd == FS_IOC_GETFLAGS) {
+               val = 0;
+               ret = orangefs_inode_getxattr(file_inode(file),
+                                             ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                             "user.pvfs2.meta_hint",
+                                             &val, sizeof(val));
+               if (ret < 0 && ret != -ENODATA)
+                       return ret;
+               else if (ret == -ENODATA)
+                       val = 0;
+               uval = val;
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n",
+                            (unsigned long long)uval);
+               return put_user(uval, (int __user *)arg);
+       } else if (cmd == FS_IOC_SETFLAGS) {
+               ret = 0;
+               if (get_user(uval, (int __user *)arg))
+                       return -EFAULT;
+               /*
+                * ORANGEFS_MIRROR_FL is set internally when the mirroring mode
+                * is turned on for a file. The user is not allowed to turn
+                * on this bit, but the bit is present if the user first gets
+                * the flags and then updates the flags with some new
+                * settings. So, we ignore it in the following edit. bligon.
+                */
+               if ((uval & ~ORANGEFS_MIRROR_FL) &
+                   (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
+                       gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
+                       return -EINVAL;
+               }
+               val = uval;
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
+                            (unsigned long long)val);
+               ret = orangefs_inode_setxattr(file_inode(file),
+                                             ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                             "user.pvfs2.meta_hint",
+                                             &val, sizeof(val), 0);
+       }
+
+       return ret;
+}
+
+/*
+ * Memory map a region of a file.
+ */
+static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_file_mmap: called on %s\n",
+                    (file ?
+                       (char *)file->f_path.dentry->d_name.name :
+                       (char *)"Unknown"));
+
+       /* set the sequential readahead hint */
+       vma->vm_flags |= VM_SEQ_READ;
+       vma->vm_flags &= ~VM_RAND_READ;
+
+       /* Use readonly mmap since we cannot support writable maps. */
+       return generic_file_readonly_mmap(file, vma);
+}
+
+#define mapping_nrpages(idata) ((idata)->nrpages)
+
+/*
+ * Called to notify the module that there are no more references to
+ * this file (i.e. no processes have it open).
+ *
+ * \note Not called when each file is closed.
+ */
+static int orangefs_file_release(struct inode *inode, struct file *file)
+{
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_file_release: called on %s\n",
+                    file->f_path.dentry->d_name.name);
+
+       orangefs_flush_inode(inode);
+
+       /*
+        * remove all associated inode pages from the page cache and mmap
+        * readahead cache (if any); this forces an expensive refresh of
+        * data for the next caller of mmap (or 'get_block' accesses)
+        */
+       if (file->f_path.dentry->d_inode &&
+           file->f_path.dentry->d_inode->i_mapping &&
+           mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
+               truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
+                                    0);
+       return 0;
+}
+
+/*
+ * Push all data for a specific file onto permanent storage.
+ */
+static int orangefs_fsync(struct file *file,
+                      loff_t start,
+                      loff_t end,
+                      int datasync)
+{
+       int ret = -EINVAL;
+       struct orangefs_inode_s *orangefs_inode =
+               ORANGEFS_I(file->f_path.dentry->d_inode);
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       /* required call */
+       filemap_write_and_wait_range(file->f_mapping, start, end);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.fsync.refn = orangefs_inode->refn;
+
+       ret = service_operation(new_op,
+                       "orangefs_fsync",
+                       get_interruptible_flag(file->f_path.dentry->d_inode));
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_fsync got return value of %d\n",
+                    ret);
+
+       op_release(new_op);
+
+       orangefs_flush_inode(file->f_path.dentry->d_inode);
+       return ret;
+}
+
+/*
+ * Change the file pointer position for an instance of an open file.
+ *
+ * \note If .llseek is overriden, we must acquire lock as described in
+ *       Documentation/filesystems/Locking.
+ *
+ * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
+ * require much changes to the FS
+ */
+static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       int ret = -EINVAL;
+       struct inode *inode = file->f_path.dentry->d_inode;
+
+       if (!inode) {
+               gossip_err("orangefs_file_llseek: invalid inode (NULL)\n");
+               return ret;
+       }
+
+       if (origin == ORANGEFS_SEEK_END) {
+               /*
+                * revalidate the inode's file size.
+                * NOTE: We are only interested in file size here,
+                * so we set mask accordingly.
+                */
+               ret = orangefs_inode_getattr(inode, ORANGEFS_ATTR_SYS_SIZE, 0);
+               if (ret) {
+                       gossip_debug(GOSSIP_FILE_DEBUG,
+                                    "%s:%s:%d calling make bad inode\n",
+                                    __FILE__,
+                                    __func__,
+                                    __LINE__);
+                       orangefs_make_bad_inode(inode);
+                       return ret;
+               }
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_file_llseek: offset is %ld | origin is %d"
+                    " | inode size is %lu\n",
+                    (long)offset,
+                    origin,
+                    (unsigned long)file->f_path.dentry->d_inode->i_size);
+
+       return generic_file_llseek(file, offset, origin);
+}
+
+/*
+ * Support local locks (locks that only this kernel knows about)
+ * if Orangefs was mounted -o local_lock.
+ */
+static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+       int rc = -EINVAL;
+
+       if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
+               if (cmd == F_GETLK) {
+                       rc = 0;
+                       posix_test_lock(filp, fl);
+               } else {
+                       rc = posix_lock_file(filp, fl, NULL);
+               }
+       }
+
+       return rc;
+}
+
+/** ORANGEFS implementation of VFS file operations */
+const struct file_operations orangefs_file_operations = {
+       .llseek         = orangefs_file_llseek,
+       .read_iter      = orangefs_file_read_iter,
+       .write_iter     = orangefs_file_write_iter,
+       .lock           = orangefs_lock,
+       .unlocked_ioctl = orangefs_ioctl,
+       .mmap           = orangefs_file_mmap,
+       .open           = generic_file_open,
+       .release        = orangefs_file_release,
+       .fsync          = orangefs_fsync,
+};
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
new file mode 100644 (file)
index 0000000..d2923dc
--- /dev/null
@@ -0,0 +1,487 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS inode operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+static int read_one_page(struct page *page)
+{
+       int ret;
+       int max_block;
+       ssize_t bytes_read = 0;
+       struct inode *inode = page->mapping->host;
+       const __u32 blocksize = PAGE_CACHE_SIZE;        /* inode->i_blksize */
+       const __u32 blockbits = PAGE_CACHE_SHIFT;       /* inode->i_blkbits */
+       struct iov_iter to;
+       struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
+
+       iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE);
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                   "orangefs_readpage called with page %p\n",
+                    page);
+
+       max_block = ((inode->i_size / blocksize) + 1);
+
+       if (page->index < max_block) {
+               loff_t blockptr_offset = (((loff_t) page->index) << blockbits);
+
+               bytes_read = orangefs_inode_read(inode,
+                                                &to,
+                                                &blockptr_offset,
+                                                inode->i_size);
+       }
+       /* this will only zero remaining unread portions of the page data */
+       iov_iter_zero(~0U, &to);
+       /* takes care of potential aliasing */
+       flush_dcache_page(page);
+       if (bytes_read < 0) {
+               ret = bytes_read;
+               SetPageError(page);
+       } else {
+               SetPageUptodate(page);
+               if (PageError(page))
+                       ClearPageError(page);
+               ret = 0;
+       }
+       /* unlock the page after the ->readpage() routine completes */
+       unlock_page(page);
+       return ret;
+}
+
+static int orangefs_readpage(struct file *file, struct page *page)
+{
+       return read_one_page(page);
+}
+
+static int orangefs_readpages(struct file *file,
+                          struct address_space *mapping,
+                          struct list_head *pages,
+                          unsigned nr_pages)
+{
+       int page_idx;
+       int ret;
+
+       gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_readpages called\n");
+
+       for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+               struct page *page;
+
+               page = list_entry(pages->prev, struct page, lru);
+               list_del(&page->lru);
+               if (!add_to_page_cache(page,
+                                      mapping,
+                                      page->index,
+                                      GFP_KERNEL)) {
+                       ret = read_one_page(page);
+                       gossip_debug(GOSSIP_INODE_DEBUG,
+                               "failure adding page to cache, read_one_page returned: %d\n",
+                               ret);
+             } else {
+                       page_cache_release(page);
+             }
+       }
+       BUG_ON(!list_empty(pages));
+       return 0;
+}
+
+static void orangefs_invalidatepage(struct page *page,
+                                unsigned int offset,
+                                unsigned int length)
+{
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_invalidatepage called on page %p "
+                    "(offset is %u)\n",
+                    page,
+                    offset);
+
+       ClearPageUptodate(page);
+       ClearPageMappedToDisk(page);
+       return;
+
+}
+
+static int orangefs_releasepage(struct page *page, gfp_t foo)
+{
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_releasepage called on page %p\n",
+                    page);
+       return 0;
+}
+
+/*
+ * Having a direct_IO entry point in the address_space_operations
+ * struct causes the kernel to allows us to use O_DIRECT on
+ * open. Nothing will ever call this thing, but in the future we
+ * will need to be able to use O_DIRECT on open in order to support
+ * AIO. Modeled after NFS, they do this too.
+ */
+/*
+ * static ssize_t orangefs_direct_IO(int rw,
+ *                     struct kiocb *iocb,
+ *                     struct iov_iter *iter,
+ *                     loff_t offset)
+ *{
+ *     gossip_debug(GOSSIP_INODE_DEBUG,
+ *                  "orangefs_direct_IO: %s\n",
+ *                  iocb->ki_filp->f_path.dentry->d_name.name);
+ *
+ *     return -EINVAL;
+ *}
+ */
+
+struct backing_dev_info orangefs_backing_dev_info = {
+       .name = "orangefs",
+       .ra_pages = 0,
+       .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+/** ORANGEFS2 implementation of address space operations */
+const struct address_space_operations orangefs_address_operations = {
+       .readpage = orangefs_readpage,
+       .readpages = orangefs_readpages,
+       .invalidatepage = orangefs_invalidatepage,
+       .releasepage = orangefs_releasepage,
+/*     .direct_IO = orangefs_direct_IO */
+};
+
+static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       loff_t orig_size = i_size_read(inode);
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
+                    __func__,
+                    get_khandle_from_ino(inode),
+                    &orangefs_inode->refn.khandle,
+                    orangefs_inode->refn.fs_id,
+                    iattr->ia_size);
+
+       truncate_setsize(inode, iattr->ia_size);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.truncate.refn = orangefs_inode->refn;
+       new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
+
+       ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+
+       /*
+        * the truncate has no downcall members to retrieve, but
+        * the status value tells us if it went through ok or not
+        */
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs: orangefs_truncate got return value of %d\n",
+                    ret);
+
+       op_release(new_op);
+
+       if (ret != 0)
+               return ret;
+
+       /*
+        * Only change the c/mtime if we are changing the size or we are
+        * explicitly asked to change it.  This handles the semantic difference
+        * between truncate() and ftruncate() as implemented in the VFS.
+        *
+        * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
+        * special case where we need to update the times despite not having
+        * these flags set.  For all other operations the VFS set these flags
+        * explicitly if it wants a timestamp update.
+        */
+       if (orig_size != i_size_read(inode) &&
+           !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
+               iattr->ia_ctime = iattr->ia_mtime =
+                       current_fs_time(inode->i_sb);
+               iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+       }
+
+       return ret;
+}
+
+/*
+ * Change attributes of an object referenced by dentry.
+ */
+int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+       int ret = -EINVAL;
+       struct inode *inode = dentry->d_inode;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_setattr: called on %s\n",
+                    dentry->d_name.name);
+
+       ret = inode_change_ok(inode, iattr);
+       if (ret)
+               goto out;
+
+       if ((iattr->ia_valid & ATTR_SIZE) &&
+           iattr->ia_size != i_size_read(inode)) {
+               ret = orangefs_setattr_size(inode, iattr);
+               if (ret)
+                       goto out;
+       }
+
+       setattr_copy(inode, iattr);
+       mark_inode_dirty(inode);
+
+       ret = orangefs_inode_setattr(inode, iattr);
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_setattr: inode_setattr returned %d\n",
+                    ret);
+
+       if (!ret && (iattr->ia_valid & ATTR_MODE))
+               /* change mod on a file that has ACLs */
+               ret = posix_acl_chmod(inode, inode->i_mode);
+
+out:
+       gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n", ret);
+       return ret;
+}
+
+/*
+ * Obtain attributes of an object given a dentry
+ */
+int orangefs_getattr(struct vfsmount *mnt,
+                 struct dentry *dentry,
+                 struct kstat *kstat)
+{
+       int ret = -ENOENT;
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_inode_s *orangefs_inode = NULL;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_getattr: called on %s\n",
+                    dentry->d_name.name);
+
+       /*
+        * Similar to the above comment, a getattr also expects that all
+        * fields/attributes of the inode would be refreshed. So again, we
+        * dont have too much of a choice but refresh all the attributes.
+        */
+       ret = orangefs_inode_getattr(inode, ORANGEFS_ATTR_SYS_ALL_NOHINT, 0);
+       if (ret == 0) {
+               generic_fillattr(inode, kstat);
+               /* override block size reported to stat */
+               orangefs_inode = ORANGEFS_I(inode);
+               kstat->blksize = orangefs_inode->blksize;
+       } else {
+               /* assume an I/O error and flag inode as bad */
+               gossip_debug(GOSSIP_INODE_DEBUG,
+                            "%s:%s:%d calling make bad inode\n",
+                            __FILE__,
+                            __func__,
+                            __LINE__);
+               orangefs_make_bad_inode(inode);
+       }
+       return ret;
+}
+
+int orangefs_permission(struct inode *inode, int mask)
+{
+       int ret;
+
+       if (mask & MAY_NOT_BLOCK)
+               return -ECHILD;
+
+       gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
+
+       /* Make sure the permission (and other common attrs) are up to date. */
+       ret = orangefs_inode_getattr(inode,
+           ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE, 0);
+       if (ret < 0)
+               return ret;
+
+       return generic_permission(inode, mask);
+}
+
+/* ORANGEDS2 implementation of VFS inode operations for files */
+struct inode_operations orangefs_file_inode_operations = {
+       .get_acl = orangefs_get_acl,
+       .set_acl = orangefs_set_acl,
+       .setattr = orangefs_setattr,
+       .getattr = orangefs_getattr,
+       .setxattr = generic_setxattr,
+       .getxattr = generic_getxattr,
+       .listxattr = orangefs_listxattr,
+       .removexattr = generic_removexattr,
+       .permission = orangefs_permission,
+};
+
+static int orangefs_init_iops(struct inode *inode)
+{
+       inode->i_mapping->a_ops = &orangefs_address_operations;
+
+       switch (inode->i_mode & S_IFMT) {
+       case S_IFREG:
+               inode->i_op = &orangefs_file_inode_operations;
+               inode->i_fop = &orangefs_file_operations;
+               inode->i_blkbits = PAGE_CACHE_SHIFT;
+               break;
+       case S_IFLNK:
+               inode->i_op = &orangefs_symlink_inode_operations;
+               break;
+       case S_IFDIR:
+               inode->i_op = &orangefs_dir_inode_operations;
+               inode->i_fop = &orangefs_dir_operations;
+               break;
+       default:
+               gossip_debug(GOSSIP_INODE_DEBUG,
+                            "%s: unsupported mode\n",
+                            __func__);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*
+ * Given a ORANGEFS object identifier (fsid, handle), convert it into a ino_t type
+ * that will be used as a hash-index from where the handle will
+ * be searched for in the VFS hash table of inodes.
+ */
+static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
+{
+       if (!ref)
+               return 0;
+       return orangefs_khandle_to_ino(&(ref->khandle));
+}
+
+/*
+ * Called to set up an inode from iget5_locked.
+ */
+static int orangefs_set_inode(struct inode *inode, void *data)
+{
+       struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
+       struct orangefs_inode_s *orangefs_inode = NULL;
+
+       /* Make sure that we have sane parameters */
+       if (!data || !inode)
+               return 0;
+       orangefs_inode = ORANGEFS_I(inode);
+       if (!orangefs_inode)
+               return 0;
+       orangefs_inode->refn.fs_id = ref->fs_id;
+       orangefs_inode->refn.khandle = ref->khandle;
+       return 0;
+}
+
+/*
+ * Called to determine if handles match.
+ */
+static int orangefs_test_inode(struct inode *inode, void *data)
+{
+       struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
+       struct orangefs_inode_s *orangefs_inode = NULL;
+
+       orangefs_inode = ORANGEFS_I(inode);
+       return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle), &(ref->khandle))
+               && orangefs_inode->refn.fs_id == ref->fs_id);
+}
+
+/*
+ * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
+ * file handle.
+ *
+ * @sb: the file system super block instance.
+ * @ref: The ORANGEFS object for which we are trying to locate an inode structure.
+ */
+struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref *ref)
+{
+       struct inode *inode = NULL;
+       unsigned long hash;
+       int error;
+
+       hash = orangefs_handle_hash(ref);
+       inode = iget5_locked(sb, hash, orangefs_test_inode, orangefs_set_inode, ref);
+       if (!inode || !(inode->i_state & I_NEW))
+               return inode;
+
+       error = orangefs_inode_getattr(inode,
+           ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE, 0);
+       if (error) {
+               iget_failed(inode);
+               return ERR_PTR(error);
+       }
+
+       inode->i_ino = hash;    /* needed for stat etc */
+       orangefs_init_iops(inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
+                    &ref->khandle,
+                    ref->fs_id,
+                    hash,
+                    inode->i_ino);
+
+       return inode;
+}
+
+/*
+ * Allocate an inode for a newly created file and insert it into the inode hash.
+ */
+struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
+               int mode, dev_t dev, struct orangefs_object_kref *ref)
+{
+       unsigned long hash = orangefs_handle_hash(ref);
+       struct inode *inode;
+       int error;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_get_custom_inode_common: called\n"
+                    "(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
+                    sb,
+                    MAJOR(dev),
+                    MINOR(dev),
+                    mode);
+
+       inode = new_inode(sb);
+       if (!inode)
+               return NULL;
+
+       orangefs_set_inode(inode, ref);
+       inode->i_ino = hash;    /* needed for stat etc */
+
+       error = orangefs_inode_getattr(inode,
+           ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE, 0);
+       if (error)
+               goto out_iput;
+
+       orangefs_init_iops(inode);
+
+       inode->i_mode = mode;
+       inode->i_uid = current_fsuid();
+       inode->i_gid = current_fsgid();
+       inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       inode->i_size = PAGE_CACHE_SIZE;
+       inode->i_rdev = dev;
+
+       error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
+       if (error < 0)
+               goto out_iput;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "Initializing ACL's for inode %pU\n",
+                    get_khandle_from_ino(inode));
+       orangefs_init_acl(inode, dir);
+       return inode;
+
+out_iput:
+       iput(inode);
+       return ERR_PTR(error);
+}
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
new file mode 100644 (file)
index 0000000..8fc55c6
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS namei operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+/*
+ * Get a newly allocated inode to go with a negative dentry.
+ */
+static int orangefs_create(struct inode *dir,
+                       struct dentry *dentry,
+                       umode_t mode,
+                       bool exclusive)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_CREATE);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.create.parent_refn = parent->refn;
+
+       fill_default_sys_attrs(new_op->upcall.req.create.attributes,
+                              ORANGEFS_TYPE_METAFILE, mode);
+
+       strncpy(new_op->upcall.req.create.d_name,
+               dentry->d_name.name, ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Create Got ORANGEFS handle %pU on fsid %d (ret=%d)\n",
+                    &new_op->downcall.resp.create.refn.khandle,
+                    new_op->downcall.resp.create.refn.fs_id, ret);
+
+       if (ret < 0) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                            "%s: failed with error code %d\n",
+                            __func__, ret);
+               goto out;
+       }
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0,
+                               &new_op->downcall.resp.create.refn);
+       if (IS_ERR(inode)) {
+               gossip_err("*** Failed to allocate orangefs file inode\n");
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned file inode new number of %pU\n",
+                    get_khandle_from_ino(inode));
+
+       d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Inode (Regular File) %pU -> %s\n",
+                    get_khandle_from_ino(inode),
+                    dentry->d_name.name);
+
+       SetMtimeFlag(parent);
+       dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+       mark_inode_dirty_sync(dir);
+       ret = 0;
+out:
+       op_release(new_op);
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s: returning %d\n", __func__, ret);
+       return ret;
+}
+
+/*
+ * Attempt to resolve an object name (dentry->d_name), parent handle, and
+ * fsid into a handle for the object.
+ */
+static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
+                                  unsigned int flags)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       struct dentry *res;
+       int ret = -EINVAL;
+
+       /*
+        * in theory we could skip a lookup here (if the intent is to
+        * create) in order to avoid a potentially failed lookup, but
+        * leaving it in can skip a valid lookup and try to create a file
+        * that already exists (e.g. the vfs already handles checking for
+        * -EEXIST on O_EXCL opens, which is broken if we skip this lookup
+        * in the create path)
+        */
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %s\n",
+                    __func__, dentry->d_name.name);
+
+       if (dentry->d_name.len > (ORANGEFS_NAME_LEN - 1))
+               return ERR_PTR(-ENAMETOOLONG);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP);
+       if (!new_op)
+               return ERR_PTR(-ENOMEM);
+
+       new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
+
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d using parent %pU\n",
+                    __FILE__,
+                    __func__,
+                    __LINE__,
+                    &parent->refn.khandle);
+       new_op->upcall.req.lookup.parent_refn = parent->refn;
+
+       strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "%s: doing lookup on %s under %pU,%d (follow=%s)\n",
+                    __func__,
+                    new_op->upcall.req.lookup.d_name,
+                    &new_op->upcall.req.lookup.parent_refn.khandle,
+                    new_op->upcall.req.lookup.parent_refn.fs_id,
+                    ((new_op->upcall.req.lookup.sym_follow ==
+                      ORANGEFS_LOOKUP_LINK_FOLLOW) ? "yes" : "no"));
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Lookup Got %pU, fsid %d (ret=%d)\n",
+                    &new_op->downcall.resp.lookup.refn.khandle,
+                    new_op->downcall.resp.lookup.refn.fs_id,
+                    ret);
+
+       if (ret < 0) {
+               if (ret == -ENOENT) {
+                       /*
+                        * if no inode was found, add a negative dentry to
+                        * dcache anyway; if we don't, we don't hold expected
+                        * lookup semantics and we most noticeably break
+                        * during directory renames.
+                        *
+                        * however, if the operation failed or exited, do not
+                        * add the dentry (e.g. in the case that a touch is
+                        * issued on a file that already exists that was
+                        * interrupted during this lookup -- no need to add
+                        * another negative dentry for an existing file)
+                        */
+
+                       gossip_debug(GOSSIP_NAME_DEBUG,
+                                    "orangefs_lookup: Adding *negative* dentry "
+                                    "%p for %s\n",
+                                    dentry,
+                                    dentry->d_name.name);
+
+                       d_add(dentry, NULL);
+                       res = NULL;
+                       goto out;
+               }
+
+               /* must be a non-recoverable error */
+               res = ERR_PTR(ret);
+               goto out;
+       }
+
+       inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
+       if (IS_ERR(inode)) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                       "error %ld from iget\n", PTR_ERR(inode));
+               res = ERR_CAST(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "%s:%s:%d "
+                    "Found good inode [%lu] with count [%d]\n",
+                    __FILE__,
+                    __func__,
+                    __LINE__,
+                    inode->i_ino,
+                    (int)atomic_read(&inode->i_count));
+
+       /* update dentry/inode pair into dcache */
+       res = d_splice_alias(inode, dentry);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Lookup success (inode ct = %d)\n",
+                    (int)atomic_read(&inode->i_count));
+out:
+       op_release(new_op);
+       return res;
+}
+
+/* return 0 on success; non-zero otherwise */
+static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
+{
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "%s: called on %s\n"
+                    "  (inode %pU): Parent is %pU | fs_id %d\n",
+                    __func__,
+                    dentry->d_name.name,
+                    get_khandle_from_ino(inode),
+                    &parent->refn.khandle,
+                    parent->refn.fs_id);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_REMOVE);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.remove.parent_refn = parent->refn;
+       strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, "orangefs_unlink",
+                               get_interruptible_flag(inode));
+
+       /* when request is serviced properly, free req op struct */
+       op_release(new_op);
+
+       if (!ret) {
+               drop_nlink(inode);
+
+               SetMtimeFlag(parent);
+               dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+               mark_inode_dirty_sync(dir);
+       }
+       return ret;
+}
+
+static int orangefs_symlink(struct inode *dir,
+                        struct dentry *dentry,
+                        const char *symname)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       int mode = 755;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__);
+
+       if (!symname)
+               return -EINVAL;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_SYMLINK);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.sym.parent_refn = parent->refn;
+
+       fill_default_sys_attrs(new_op->upcall.req.sym.attributes,
+                              ORANGEFS_TYPE_SYMLINK,
+                              mode);
+
+       strncpy(new_op->upcall.req.sym.entry_name,
+               dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+       strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Symlink Got ORANGEFS handle %pU on fsid %d (ret=%d)\n",
+                    &new_op->downcall.resp.sym.refn.khandle,
+                    new_op->downcall.resp.sym.refn.fs_id, ret);
+
+       if (ret < 0) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                           "%s: failed with error code %d\n",
+                           __func__, ret);
+               goto out;
+       }
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0,
+                               &new_op->downcall.resp.sym.refn);
+       if (IS_ERR(inode)) {
+               gossip_err
+                   ("*** Failed to allocate orangefs symlink inode\n");
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned symlink inode new number of %pU\n",
+                    get_khandle_from_ino(inode));
+
+       d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Inode (Symlink) %pU -> %s\n",
+                    get_khandle_from_ino(inode),
+                    dentry->d_name.name);
+
+       SetMtimeFlag(parent);
+       dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+       mark_inode_dirty_sync(dir);
+       ret = 0;
+out:
+       op_release(new_op);
+       return ret;
+}
+
+static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       int ret;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.mkdir.parent_refn = parent->refn;
+
+       fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes,
+                             ORANGEFS_TYPE_DIRECTORY, mode);
+
+       strncpy(new_op->upcall.req.mkdir.d_name,
+               dentry->d_name.name, ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Mkdir Got ORANGEFS handle %pU on fsid %d\n",
+                    &new_op->downcall.resp.mkdir.refn.khandle,
+                    new_op->downcall.resp.mkdir.refn.fs_id);
+
+       if (ret < 0) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                            "%s: failed with error code %d\n",
+                            __func__, ret);
+               goto out;
+       }
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0,
+                               &new_op->downcall.resp.mkdir.refn);
+       if (IS_ERR(inode)) {
+               gossip_err("*** Failed to allocate orangefs dir inode\n");
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned dir inode new number of %pU\n",
+                    get_khandle_from_ino(inode));
+
+       d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Inode (Directory) %pU -> %s\n",
+                    get_khandle_from_ino(inode),
+                    dentry->d_name.name);
+
+       /*
+        * NOTE: we have no good way to keep nlink consistent for directories
+        * across clients; keep constant at 1.
+        */
+       SetMtimeFlag(parent);
+       dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+       mark_inode_dirty_sync(dir);
+out:
+       op_release(new_op);
+       return ret;
+}
+
+static int orangefs_rename(struct inode *old_dir,
+                       struct dentry *old_dentry,
+                       struct inode *new_dir,
+                       struct dentry *new_dentry)
+{
+       struct orangefs_kernel_op_s *new_op;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "orangefs_rename: called (%s/%s => %s/%s) ct=%d\n",
+                    old_dentry->d_parent->d_name.name,
+                    old_dentry->d_name.name,
+                    new_dentry->d_parent->d_name.name,
+                    new_dentry->d_name.name,
+                    d_count(new_dentry));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_RENAME);
+       if (!new_op)
+               return -EINVAL;
+
+       new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn;
+       new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn;
+
+       strncpy(new_op->upcall.req.rename.d_old_name,
+               old_dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+       strncpy(new_op->upcall.req.rename.d_new_name,
+               new_dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op,
+                               "orangefs_rename",
+                               get_interruptible_flag(old_dentry->d_inode));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "orangefs_rename: got downcall status %d\n",
+                    ret);
+
+       if (new_dentry->d_inode)
+               new_dentry->d_inode->i_ctime = CURRENT_TIME;
+
+       op_release(new_op);
+       return ret;
+}
+
+/* ORANGEFS implementation of VFS inode operations for directories */
+struct inode_operations orangefs_dir_inode_operations = {
+       .lookup = orangefs_lookup,
+       .get_acl = orangefs_get_acl,
+       .set_acl = orangefs_set_acl,
+       .create = orangefs_create,
+       .unlink = orangefs_unlink,
+       .symlink = orangefs_symlink,
+       .mkdir = orangefs_mkdir,
+       .rmdir = orangefs_unlink,
+       .rename = orangefs_rename,
+       .setattr = orangefs_setattr,
+       .getattr = orangefs_getattr,
+       .setxattr = generic_setxattr,
+       .getxattr = generic_getxattr,
+       .removexattr = generic_removexattr,
+       .listxattr = orangefs_listxattr,
+       .permission = orangefs_permission,
+};
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
new file mode 100644 (file)
index 0000000..c60019d
--- /dev/null
@@ -0,0 +1,577 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+DECLARE_WAIT_QUEUE_HEAD(orangefs_bufmap_init_waitq);
+
+/* used to describe mapped buffers */
+struct orangefs_bufmap_desc {
+       void *uaddr;                    /* user space address pointer */
+       struct page **page_array;       /* array of mapped pages */
+       int array_count;                /* size of above arrays */
+       struct list_head list_link;
+};
+
+static struct orangefs_bufmap {
+       atomic_t refcnt;
+
+       int desc_size;
+       int desc_shift;
+       int desc_count;
+       int total_size;
+       int page_count;
+
+       struct page **page_array;
+       struct orangefs_bufmap_desc *desc_array;
+
+       /* array to track usage of buffer descriptors */
+       int *buffer_index_array;
+       spinlock_t buffer_index_lock;
+
+       /* array to track usage of buffer descriptors for readdir */
+       int readdir_index_array[ORANGEFS_READDIR_DEFAULT_DESC_COUNT];
+       spinlock_t readdir_index_lock;
+} *__orangefs_bufmap;
+
+static DEFINE_SPINLOCK(orangefs_bufmap_lock);
+
+static void
+orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
+{
+       int i;
+
+       for (i = 0; i < bufmap->page_count; i++)
+               page_cache_release(bufmap->page_array[i]);
+}
+
+static void
+orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
+{
+       kfree(bufmap->page_array);
+       kfree(bufmap->desc_array);
+       kfree(bufmap->buffer_index_array);
+       kfree(bufmap);
+}
+
+static struct orangefs_bufmap *orangefs_bufmap_ref(void)
+{
+       struct orangefs_bufmap *bufmap = NULL;
+
+       spin_lock(&orangefs_bufmap_lock);
+       if (__orangefs_bufmap) {
+               bufmap = __orangefs_bufmap;
+               atomic_inc(&bufmap->refcnt);
+       }
+       spin_unlock(&orangefs_bufmap_lock);
+       return bufmap;
+}
+
+static void orangefs_bufmap_unref(struct orangefs_bufmap *bufmap)
+{
+       if (atomic_dec_and_lock(&bufmap->refcnt, &orangefs_bufmap_lock)) {
+               __orangefs_bufmap = NULL;
+               spin_unlock(&orangefs_bufmap_lock);
+
+               orangefs_bufmap_unmap(bufmap);
+               orangefs_bufmap_free(bufmap);
+       }
+}
+
+/*
+ * XXX: Can the size and shift change while the caller gives up the 
+ * XXX: lock between calling this and doing something useful?
+ */
+
+int orangefs_bufmap_size_query(void)
+{
+       struct orangefs_bufmap *bufmap;
+       int size = 0;
+       bufmap = orangefs_bufmap_ref();
+       if (bufmap) {
+               size = bufmap->desc_size;
+               orangefs_bufmap_unref(bufmap);
+       }
+       return size;
+}
+
+int orangefs_bufmap_shift_query(void)
+{
+       struct orangefs_bufmap *bufmap;
+       int shift = 0;
+       bufmap = orangefs_bufmap_ref();
+       if (bufmap) {
+               shift = bufmap->desc_shift;
+               orangefs_bufmap_unref(bufmap);
+       }
+       return shift;
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
+
+/*
+ * orangefs_get_bufmap_init
+ *
+ * If bufmap_init is 1, then the shared memory system, including the
+ * buffer_index_array, is available.  Otherwise, it is not.
+ *
+ * returns the value of bufmap_init
+ */
+int orangefs_get_bufmap_init(void)
+{
+       return __orangefs_bufmap ? 1 : 0;
+}
+
+
+static struct orangefs_bufmap *
+orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
+{
+       struct orangefs_bufmap *bufmap;
+
+       bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL);
+       if (!bufmap)
+               goto out;
+
+       atomic_set(&bufmap->refcnt, 1);
+       bufmap->total_size = user_desc->total_size;
+       bufmap->desc_count = user_desc->count;
+       bufmap->desc_size = user_desc->size;
+       bufmap->desc_shift = ilog2(bufmap->desc_size);
+
+       spin_lock_init(&bufmap->buffer_index_lock);
+       bufmap->buffer_index_array =
+               kcalloc(bufmap->desc_count, sizeof(int), GFP_KERNEL);
+       if (!bufmap->buffer_index_array) {
+               gossip_err("orangefs: could not allocate %d buffer indices\n",
+                               bufmap->desc_count);
+               goto out_free_bufmap;
+       }
+       spin_lock_init(&bufmap->readdir_index_lock);
+
+       bufmap->desc_array =
+               kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc),
+                       GFP_KERNEL);
+       if (!bufmap->desc_array) {
+               gossip_err("orangefs: could not allocate %d descriptors\n",
+                               bufmap->desc_count);
+               goto out_free_index_array;
+       }
+
+       bufmap->page_count = bufmap->total_size / PAGE_SIZE;
+
+       /* allocate storage to track our page mappings */
+       bufmap->page_array =
+               kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
+       if (!bufmap->page_array)
+               goto out_free_desc_array;
+
+       return bufmap;
+
+out_free_desc_array:
+       kfree(bufmap->desc_array);
+out_free_index_array:
+       kfree(bufmap->buffer_index_array);
+out_free_bufmap:
+       kfree(bufmap);
+out:
+       return NULL;
+}
+
+static int
+orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
+               struct ORANGEFS_dev_map_desc *user_desc)
+{
+       int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
+       int offset = 0, ret, i;
+
+       /* map the pages */
+       ret = get_user_pages_fast((unsigned long)user_desc->ptr,
+                            bufmap->page_count, 1, bufmap->page_array);
+
+       if (ret < 0)
+               return ret;
+
+       if (ret != bufmap->page_count) {
+               gossip_err("orangefs error: asked for %d pages, only got %d.\n",
+                               bufmap->page_count, ret);
+
+               for (i = 0; i < ret; i++) {
+                       SetPageError(bufmap->page_array[i]);
+                       page_cache_release(bufmap->page_array[i]);
+               }
+               return -ENOMEM;
+       }
+
+       /*
+        * ideally we want to get kernel space pointers for each page, but
+        * we can't kmap that many pages at once if highmem is being used.
+        * so instead, we just kmap/kunmap the page address each time the
+        * kaddr is needed.
+        */
+       for (i = 0; i < bufmap->page_count; i++)
+               flush_dcache_page(bufmap->page_array[i]);
+
+       /* build a list of available descriptors */
+       for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
+               bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
+               bufmap->desc_array[i].array_count = pages_per_desc;
+               bufmap->desc_array[i].uaddr =
+                   (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
+               offset += pages_per_desc;
+       }
+
+       return 0;
+}
+
+/*
+ * orangefs_bufmap_initialize()
+ *
+ * initializes the mapped buffer interface
+ *
+ * returns 0 on success, -errno on failure
+ */
+int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
+{
+       struct orangefs_bufmap *bufmap;
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "orangefs_bufmap_initialize: called (ptr ("
+                    "%p) sz (%d) cnt(%d).\n",
+                    user_desc->ptr,
+                    user_desc->size,
+                    user_desc->count);
+
+       /*
+        * sanity check alignment and size of buffer that caller wants to
+        * work with
+        */
+       if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
+           (unsigned long)user_desc->ptr) {
+               gossip_err("orangefs error: memory alignment (front). %p\n",
+                          user_desc->ptr);
+               goto out;
+       }
+
+       if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
+           != (unsigned long)(user_desc->ptr + user_desc->total_size)) {
+               gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
+                          user_desc->ptr,
+                          user_desc->total_size);
+               goto out;
+       }
+
+       if (user_desc->total_size != (user_desc->size * user_desc->count)) {
+               gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
+                          user_desc->total_size,
+                          user_desc->size,
+                          user_desc->count);
+               goto out;
+       }
+
+       if ((user_desc->size % PAGE_SIZE) != 0) {
+               gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
+                          user_desc->size);
+               goto out;
+       }
+
+       ret = -ENOMEM;
+       bufmap = orangefs_bufmap_alloc(user_desc);
+       if (!bufmap)
+               goto out;
+
+       ret = orangefs_bufmap_map(bufmap, user_desc);
+       if (ret)
+               goto out_free_bufmap;
+
+
+       spin_lock(&orangefs_bufmap_lock);
+       if (__orangefs_bufmap) {
+               spin_unlock(&orangefs_bufmap_lock);
+               gossip_err("orangefs: error: bufmap already initialized.\n");
+               ret = -EALREADY;
+               goto out_unmap_bufmap;
+       }
+       __orangefs_bufmap = bufmap;
+       spin_unlock(&orangefs_bufmap_lock);
+
+       /*
+        * If there are operations in orangefs_bufmap_init_waitq, wake them up.
+        * This scenario occurs when the client-core is restarted and I/O
+        * requests in the in-progress or waiting tables are restarted.  I/O
+        * requests cannot be restarted until the shared memory system is
+        * completely re-initialized, so we put the I/O requests in this
+        * waitq until initialization has completed.  NOTE:  the I/O requests
+        * are also on a timer, so they don't wait forever just in case the
+        * client-core doesn't come back up.
+        */
+       wake_up_interruptible(&orangefs_bufmap_init_waitq);
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "orangefs_bufmap_initialize: exiting normally\n");
+       return 0;
+
+out_unmap_bufmap:
+       orangefs_bufmap_unmap(bufmap);
+out_free_bufmap:
+       orangefs_bufmap_free(bufmap);
+out:
+       return ret;
+}
+
+/*
+ * orangefs_bufmap_finalize()
+ *
+ * shuts down the mapped buffer interface and releases any resources
+ * associated with it
+ *
+ * no return value
+ */
+void orangefs_bufmap_finalize(void)
+{
+       gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
+       BUG_ON(!__orangefs_bufmap);
+       orangefs_bufmap_unref(__orangefs_bufmap);
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "orangefs_bufmap_finalize: exiting normally\n");
+}
+
+struct slot_args {
+       int slot_count;
+       int *slot_array;
+       spinlock_t *slot_lock;
+       wait_queue_head_t *slot_wq;
+};
+
+static int wait_for_a_slot(struct slot_args *slargs, int *buffer_index)
+{
+       int ret = -1;
+       int i = 0;
+       DEFINE_WAIT(wait_entry);
+
+       while (1) {
+               /*
+                * check for available desc, slot_lock is the appropriate
+                * index_lock
+                */
+               spin_lock(slargs->slot_lock);
+               prepare_to_wait_exclusive(slargs->slot_wq,
+                                         &wait_entry,
+                                         TASK_INTERRUPTIBLE);
+               for (i = 0; i < slargs->slot_count; i++)
+                       if (slargs->slot_array[i] == 0) {
+                               slargs->slot_array[i] = 1;
+                               *buffer_index = i;
+                               ret = 0;
+                               break;
+                       }
+               spin_unlock(slargs->slot_lock);
+
+               /* if we acquired a buffer, then break out of while */
+               if (ret == 0)
+                       break;
+
+               if (!signal_pending(current)) {
+                       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                                    "[BUFMAP]: waiting %d "
+                                    "seconds for a slot\n",
+                                    slot_timeout_secs);
+                       if (!schedule_timeout(slot_timeout_secs * HZ)) {
+                               gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                                            "*** wait_for_a_slot timed out\n");
+                               ret = -ETIMEDOUT;
+                               break;
+                       }
+                       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                         "[BUFMAP]: woken up by a slot becoming available.\n");
+                       continue;
+               }
+
+               gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs: %s interrupted.\n",
+                            __func__);
+               ret = -EINTR;
+               break;
+       }
+
+       spin_lock(slargs->slot_lock);
+       finish_wait(slargs->slot_wq, &wait_entry);
+       spin_unlock(slargs->slot_lock);
+       return ret;
+}
+
+static void put_back_slot(struct slot_args *slargs, int buffer_index)
+{
+       /* slot_lock is the appropriate index_lock */
+       spin_lock(slargs->slot_lock);
+       if (buffer_index < 0 || buffer_index >= slargs->slot_count) {
+               spin_unlock(slargs->slot_lock);
+               return;
+       }
+
+       /* put the desc back on the queue */
+       slargs->slot_array[buffer_index] = 0;
+       spin_unlock(slargs->slot_lock);
+
+       /* wake up anyone who may be sleeping on the queue */
+       wake_up_interruptible(slargs->slot_wq);
+}
+
+/*
+ * orangefs_bufmap_get()
+ *
+ * gets a free mapped buffer descriptor, will sleep until one becomes
+ * available if necessary
+ *
+ * returns 0 on success, -errno on failure
+ */
+int orangefs_bufmap_get(struct orangefs_bufmap **mapp, int *buffer_index)
+{
+       struct orangefs_bufmap *bufmap = orangefs_bufmap_ref();
+       struct slot_args slargs;
+       int ret;
+
+       if (!bufmap) {
+               gossip_err("orangefs: please confirm that pvfs2-client daemon is running.\n");
+               return -EIO;
+       }
+
+       slargs.slot_count = bufmap->desc_count;
+       slargs.slot_array = bufmap->buffer_index_array;
+       slargs.slot_lock = &bufmap->buffer_index_lock;
+       slargs.slot_wq = &bufmap_waitq;
+       ret = wait_for_a_slot(&slargs, buffer_index);
+       if (ret)
+               orangefs_bufmap_unref(bufmap);
+       *mapp = bufmap;
+       return ret;
+}
+
+/*
+ * orangefs_bufmap_put()
+ *
+ * returns a mapped buffer descriptor to the collection
+ *
+ * no return value
+ */
+void orangefs_bufmap_put(struct orangefs_bufmap *bufmap, int buffer_index)
+{
+       struct slot_args slargs;
+
+       slargs.slot_count = bufmap->desc_count;
+       slargs.slot_array = bufmap->buffer_index_array;
+       slargs.slot_lock = &bufmap->buffer_index_lock;
+       slargs.slot_wq = &bufmap_waitq;
+       put_back_slot(&slargs, buffer_index);
+       orangefs_bufmap_unref(bufmap);
+}
+
+/*
+ * orangefs_readdir_index_get()
+ *
+ * gets a free descriptor, will sleep until one becomes
+ * available if necessary.
+ * Although the readdir buffers are not mapped into kernel space
+ * we could do that at a later point of time. Regardless, these
+ * indices are used by the client-core.
+ *
+ * returns 0 on success, -errno on failure
+ */
+int orangefs_readdir_index_get(struct orangefs_bufmap **mapp, int *buffer_index)
+{
+       struct orangefs_bufmap *bufmap = orangefs_bufmap_ref();
+       struct slot_args slargs;
+       int ret;
+
+       if (!bufmap) {
+               gossip_err("orangefs: please confirm that pvfs2-client daemon is running.\n");
+               return -EIO;
+       }
+
+       slargs.slot_count = ORANGEFS_READDIR_DEFAULT_DESC_COUNT;
+       slargs.slot_array = bufmap->readdir_index_array;
+       slargs.slot_lock = &bufmap->readdir_index_lock;
+       slargs.slot_wq = &readdir_waitq;
+       ret = wait_for_a_slot(&slargs, buffer_index);
+       if (ret)
+               orangefs_bufmap_unref(bufmap);
+       *mapp = bufmap;
+       return ret;
+}
+
+void orangefs_readdir_index_put(struct orangefs_bufmap *bufmap, int buffer_index)
+{
+       struct slot_args slargs;
+
+       slargs.slot_count = ORANGEFS_READDIR_DEFAULT_DESC_COUNT;
+       slargs.slot_array = bufmap->readdir_index_array;
+       slargs.slot_lock = &bufmap->readdir_index_lock;
+       slargs.slot_wq = &readdir_waitq;
+       put_back_slot(&slargs, buffer_index);
+       orangefs_bufmap_unref(bufmap);
+}
+
+/*
+ * we've been handed an iovec, we need to copy it to 
+ * the shared memory descriptor at "buffer_index".
+ */
+int orangefs_bufmap_copy_from_iovec(struct orangefs_bufmap *bufmap,
+                               struct iov_iter *iter,
+                               int buffer_index,
+                               size_t size)
+{
+       struct orangefs_bufmap_desc *to = &bufmap->desc_array[buffer_index];
+       int i;
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "%s: buffer_index:%d: size:%zu:\n",
+                    __func__, buffer_index, size);
+
+
+       for (i = 0; size; i++) {
+               struct page *page = to->page_array[i];
+               size_t n = size;
+               if (n > PAGE_SIZE)
+                       n = PAGE_SIZE;
+               n = copy_page_from_iter(page, 0, n, iter);
+               if (!n)
+                       return -EFAULT;
+               size -= n;
+       }
+       return 0;
+
+}
+
+/*
+ * we've been handed an iovec, we need to fill it from
+ * the shared memory descriptor at "buffer_index".
+ */
+int orangefs_bufmap_copy_to_iovec(struct orangefs_bufmap *bufmap,
+                                   struct iov_iter *iter,
+                                   int buffer_index,
+                                   size_t size)
+{
+       struct orangefs_bufmap_desc *from = &bufmap->desc_array[buffer_index];
+       int i;
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "%s: buffer_index:%d: size:%zu:\n",
+                    __func__, buffer_index, size);
+
+
+       for (i = 0; size; i++) {
+               struct page *page = from->page_array[i];
+               size_t n = size;
+               if (n > PAGE_SIZE)
+                       n = PAGE_SIZE;
+               n = copy_page_to_iter(page, 0, n, iter);
+               if (!n)
+                       return -EFAULT;
+               size -= n;
+       }
+       return 0;
+}
diff --git a/fs/orangefs/orangefs-bufmap.h b/fs/orangefs/orangefs-bufmap.h
new file mode 100644 (file)
index 0000000..dff55e2
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#ifndef __ORANGEFS_BUFMAP_H
+#define __ORANGEFS_BUFMAP_H
+
+struct orangefs_bufmap;
+
+int orangefs_bufmap_size_query(void);
+
+int orangefs_bufmap_shift_query(void);
+
+int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc);
+
+int orangefs_get_bufmap_init(void);
+
+void orangefs_bufmap_finalize(void);
+
+int orangefs_bufmap_get(struct orangefs_bufmap **mapp, int *buffer_index);
+
+void orangefs_bufmap_put(struct orangefs_bufmap *bufmap, int buffer_index);
+
+int orangefs_readdir_index_get(struct orangefs_bufmap **mapp, int *buffer_index);
+
+void orangefs_readdir_index_put(struct orangefs_bufmap *bufmap, int buffer_index);
+
+int orangefs_bufmap_copy_from_iovec(struct orangefs_bufmap *bufmap,
+                               struct iov_iter *iter,
+                               int buffer_index,
+                               size_t size);
+
+int orangefs_bufmap_copy_to_iovec(struct orangefs_bufmap *bufmap,
+                             struct iov_iter *iter,
+                             int buffer_index,
+                             size_t size);
+
+size_t orangefs_bufmap_copy_to_user_task_iovec(struct task_struct *tsk,
+                                          struct iovec *iovec,
+                                          unsigned long nr_segs,
+                                          struct orangefs_bufmap *bufmap,
+                                          int buffer_index,
+                                          size_t bytes_to_be_copied);
+
+#endif /* __ORANGEFS_BUFMAP_H */
diff --git a/fs/orangefs/orangefs-cache.c b/fs/orangefs/orangefs-cache.c
new file mode 100644 (file)
index 0000000..3b3de91
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+/* tags assigned to kernel upcall operations */
+static __u64 next_tag_value;
+static DEFINE_SPINLOCK(next_tag_value_lock);
+
+/* the orangefs memory caches */
+
+/* a cache for orangefs upcall/downcall operations */
+static struct kmem_cache *op_cache;
+
+int op_cache_initialize(void)
+{
+       op_cache = kmem_cache_create("orangefs_op_cache",
+                                    sizeof(struct orangefs_kernel_op_s),
+                                    0,
+                                    ORANGEFS_CACHE_CREATE_FLAGS,
+                                    NULL);
+
+       if (!op_cache) {
+               gossip_err("Cannot create orangefs_op_cache\n");
+               return -ENOMEM;
+       }
+
+       /* initialize our atomic tag counter */
+       spin_lock(&next_tag_value_lock);
+       next_tag_value = 100;
+       spin_unlock(&next_tag_value_lock);
+       return 0;
+}
+
+int op_cache_finalize(void)
+{
+       kmem_cache_destroy(op_cache);
+       return 0;
+}
+
+char *get_opname_string(struct orangefs_kernel_op_s *new_op)
+{
+       if (new_op) {
+               __s32 type = new_op->upcall.type;
+
+               if (type == ORANGEFS_VFS_OP_FILE_IO)
+                       return "OP_FILE_IO";
+               else if (type == ORANGEFS_VFS_OP_LOOKUP)
+                       return "OP_LOOKUP";
+               else if (type == ORANGEFS_VFS_OP_CREATE)
+                       return "OP_CREATE";
+               else if (type == ORANGEFS_VFS_OP_GETATTR)
+                       return "OP_GETATTR";
+               else if (type == ORANGEFS_VFS_OP_REMOVE)
+                       return "OP_REMOVE";
+               else if (type == ORANGEFS_VFS_OP_MKDIR)
+                       return "OP_MKDIR";
+               else if (type == ORANGEFS_VFS_OP_READDIR)
+                       return "OP_READDIR";
+               else if (type == ORANGEFS_VFS_OP_READDIRPLUS)
+                       return "OP_READDIRPLUS";
+               else if (type == ORANGEFS_VFS_OP_SETATTR)
+                       return "OP_SETATTR";
+               else if (type == ORANGEFS_VFS_OP_SYMLINK)
+                       return "OP_SYMLINK";
+               else if (type == ORANGEFS_VFS_OP_RENAME)
+                       return "OP_RENAME";
+               else if (type == ORANGEFS_VFS_OP_STATFS)
+                       return "OP_STATFS";
+               else if (type == ORANGEFS_VFS_OP_TRUNCATE)
+                       return "OP_TRUNCATE";
+               else if (type == ORANGEFS_VFS_OP_MMAP_RA_FLUSH)
+                       return "OP_MMAP_RA_FLUSH";
+               else if (type == ORANGEFS_VFS_OP_FS_MOUNT)
+                       return "OP_FS_MOUNT";
+               else if (type == ORANGEFS_VFS_OP_FS_UMOUNT)
+                       return "OP_FS_UMOUNT";
+               else if (type == ORANGEFS_VFS_OP_GETXATTR)
+                       return "OP_GETXATTR";
+               else if (type == ORANGEFS_VFS_OP_SETXATTR)
+                       return "OP_SETXATTR";
+               else if (type == ORANGEFS_VFS_OP_LISTXATTR)
+                       return "OP_LISTXATTR";
+               else if (type == ORANGEFS_VFS_OP_REMOVEXATTR)
+                       return "OP_REMOVEXATTR";
+               else if (type == ORANGEFS_VFS_OP_PARAM)
+                       return "OP_PARAM";
+               else if (type == ORANGEFS_VFS_OP_PERF_COUNT)
+                       return "OP_PERF_COUNT";
+               else if (type == ORANGEFS_VFS_OP_CANCEL)
+                       return "OP_CANCEL";
+               else if (type == ORANGEFS_VFS_OP_FSYNC)
+                       return "OP_FSYNC";
+               else if (type == ORANGEFS_VFS_OP_FSKEY)
+                       return "OP_FSKEY";
+       }
+       return "OP_UNKNOWN?";
+}
+
+struct orangefs_kernel_op_s *op_alloc(__s32 type)
+{
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL);
+       if (new_op) {
+               INIT_LIST_HEAD(&new_op->list);
+               spin_lock_init(&new_op->lock);
+               init_waitqueue_head(&new_op->waitq);
+
+               atomic_set(&new_op->ref_count, 1);
+
+               init_completion(&new_op->done);
+
+               new_op->upcall.type = ORANGEFS_VFS_OP_INVALID;
+               new_op->downcall.type = ORANGEFS_VFS_OP_INVALID;
+               new_op->downcall.status = -1;
+
+               new_op->op_state = OP_VFS_STATE_UNKNOWN;
+               new_op->tag = 0;
+
+               /* initialize the op specific tag and upcall credentials */
+               spin_lock(&next_tag_value_lock);
+               new_op->tag = next_tag_value++;
+               if (next_tag_value == 0)
+                       next_tag_value = 100;
+               spin_unlock(&next_tag_value_lock);
+               new_op->upcall.type = type;
+               new_op->attempts = 0;
+               gossip_debug(GOSSIP_CACHE_DEBUG,
+                            "Alloced OP (%p: %llu %s)\n",
+                            new_op,
+                            llu(new_op->tag),
+                            get_opname_string(new_op));
+
+               new_op->upcall.uid = from_kuid(current_user_ns(),
+                                              current_fsuid());
+
+               new_op->upcall.gid = from_kgid(current_user_ns(),
+                                              current_fsgid());
+       } else {
+               gossip_err("op_alloc: kmem_cache_zalloc failed!\n");
+       }
+       return new_op;
+}
+
+void __op_release(struct orangefs_kernel_op_s *orangefs_op)
+{
+       if (orangefs_op) {
+               gossip_debug(GOSSIP_CACHE_DEBUG,
+                            "Releasing OP (%p: %llu)\n",
+                            orangefs_op,
+                            llu(orangefs_op->tag));
+               kmem_cache_free(op_cache, orangefs_op);
+       } else {
+               gossip_err("NULL pointer in op_release\n");
+       }
+}
diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h
new file mode 100644 (file)
index 0000000..387db17
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/* This file just defines debugging masks to be used with the gossip
+ * logging utility.  All debugging masks for ORANGEFS are kept here to make
+ * sure we don't have collisions.
+ */
+
+#ifndef __ORANGEFS_DEBUG_H
+#define __ORANGEFS_DEBUG_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#define        GOSSIP_NO_DEBUG                 (__u64)0
+
+#define GOSSIP_SUPER_DEBUG             ((__u64)1 << 0)
+#define GOSSIP_INODE_DEBUG             ((__u64)1 << 1)
+#define GOSSIP_FILE_DEBUG              ((__u64)1 << 2)
+#define GOSSIP_DIR_DEBUG               ((__u64)1 << 3)
+#define GOSSIP_UTILS_DEBUG             ((__u64)1 << 4)
+#define GOSSIP_WAIT_DEBUG              ((__u64)1 << 5)
+#define GOSSIP_ACL_DEBUG               ((__u64)1 << 6)
+#define GOSSIP_DCACHE_DEBUG            ((__u64)1 << 7)
+#define GOSSIP_DEV_DEBUG               ((__u64)1 << 8)
+#define GOSSIP_NAME_DEBUG              ((__u64)1 << 9)
+#define GOSSIP_BUFMAP_DEBUG            ((__u64)1 << 10)
+#define GOSSIP_CACHE_DEBUG             ((__u64)1 << 11)
+#define GOSSIP_DEBUGFS_DEBUG           ((__u64)1 << 12)
+#define GOSSIP_XATTR_DEBUG             ((__u64)1 << 13)
+#define GOSSIP_INIT_DEBUG              ((__u64)1 << 14)
+#define GOSSIP_SYSFS_DEBUG             ((__u64)1 << 15)
+
+#define GOSSIP_MAX_NR                 16
+#define GOSSIP_MAX_DEBUG              (((__u64)1 << GOSSIP_MAX_NR) - 1)
+
+/*function prototypes*/
+__u64 ORANGEFS_kmod_eventlog_to_mask(const char *event_logging);
+__u64 ORANGEFS_debug_eventlog_to_mask(const char *event_logging);
+char *ORANGEFS_debug_mask_to_eventlog(__u64 mask);
+char *ORANGEFS_kmod_mask_to_eventlog(__u64 mask);
+
+/* a private internal type */
+struct __keyword_mask_s {
+       const char *keyword;
+       __u64 mask_val;
+};
+
+/*
+ * Map all kmod keywords to kmod debug masks here. Keep this
+ * structure "packed":
+ *
+ *   "all" is always last...
+ *
+ *   keyword     mask_val     index
+ *     foo          1           0
+ *     bar          2           1
+ *     baz          4           2
+ *     qux          8           3
+ *      .           .           .
+ */
+static struct __keyword_mask_s s_kmod_keyword_mask_map[] = {
+       {"super", GOSSIP_SUPER_DEBUG},
+       {"inode", GOSSIP_INODE_DEBUG},
+       {"file", GOSSIP_FILE_DEBUG},
+       {"dir", GOSSIP_DIR_DEBUG},
+       {"utils", GOSSIP_UTILS_DEBUG},
+       {"wait", GOSSIP_WAIT_DEBUG},
+       {"acl", GOSSIP_ACL_DEBUG},
+       {"dcache", GOSSIP_DCACHE_DEBUG},
+       {"dev", GOSSIP_DEV_DEBUG},
+       {"name", GOSSIP_NAME_DEBUG},
+       {"bufmap", GOSSIP_BUFMAP_DEBUG},
+       {"cache", GOSSIP_CACHE_DEBUG},
+       {"debugfs", GOSSIP_DEBUGFS_DEBUG},
+       {"xattr", GOSSIP_XATTR_DEBUG},
+       {"init", GOSSIP_INIT_DEBUG},
+       {"sysfs", GOSSIP_SYSFS_DEBUG},
+       {"none", GOSSIP_NO_DEBUG},
+       {"all", GOSSIP_MAX_DEBUG}
+};
+
+static const int num_kmod_keyword_mask_map = (int)
+       (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s));
+
+#endif /* __ORANGEFS_DEBUG_H */
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
new file mode 100644 (file)
index 0000000..9eb7972
--- /dev/null
@@ -0,0 +1,457 @@
+/*
+ * What:               /sys/kernel/debug/orangefs/debug-help
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     List of client and kernel debug keywords.
+ *
+ *
+ * What:               /sys/kernel/debug/orangefs/client-debug
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Debug setting for "the client", the userspace
+ *                     helper for the kernel module.
+ *
+ *
+ * What:               /sys/kernel/debug/orangefs/kernel-debug
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Debug setting for the orangefs kernel module.
+ *
+ *                     Any of the keywords, or comma-separated lists
+ *                     of keywords, from debug-help can be catted to
+ *                     client-debug or kernel-debug.
+ *
+ *                     "none", "all" and "verbose" are special keywords
+ *                     for client-debug. Setting client-debug to "all"
+ *                     is kind of like trying to drink water from a
+ *                     fire hose, "verbose" triggers most of the same
+ *                     output except for the constant flow of output
+ *                     from the main wait loop.
+ *
+ *                     "none" and "all" are similar settings for kernel-debug
+ *                     no need for a "verbose".
+ */
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+
+#include "orangefs-debugfs.h"
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+static int orangefs_debug_disabled = 1;
+
+static int orangefs_debug_help_open(struct inode *, struct file *);
+
+const struct file_operations debug_help_fops = {
+       .open           = orangefs_debug_help_open,
+       .read           = seq_read,
+       .release        = seq_release,
+       .llseek         = seq_lseek,
+};
+
+static void *help_start(struct seq_file *, loff_t *);
+static void *help_next(struct seq_file *, void *, loff_t *);
+static void help_stop(struct seq_file *, void *);
+static int help_show(struct seq_file *, void *);
+
+static const struct seq_operations help_debug_ops = {
+       .start  = help_start,
+       .next   = help_next,
+       .stop   = help_stop,
+       .show   = help_show,
+};
+
+/*
+ * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and
+ * ORANGEFS_KMOD_DEBUG_FILE.
+ */
+static DEFINE_MUTEX(orangefs_debug_lock);
+
+int orangefs_debug_open(struct inode *, struct file *);
+
+static ssize_t orangefs_debug_read(struct file *,
+                                char __user *,
+                                size_t,
+                                loff_t *);
+
+static ssize_t orangefs_debug_write(struct file *,
+                                 const char __user *,
+                                 size_t,
+                                 loff_t *);
+
+static const struct file_operations kernel_debug_fops = {
+       .open           = orangefs_debug_open,
+       .read           = orangefs_debug_read,
+       .write          = orangefs_debug_write,
+       .llseek         = generic_file_llseek,
+};
+
+/*
+ * initialize kmod debug operations, create orangefs debugfs dir and
+ * ORANGEFS_KMOD_DEBUG_HELP_FILE.
+ */
+int orangefs_debugfs_init(void)
+{
+
+       int rc = -ENOMEM;
+
+       debug_dir = debugfs_create_dir("orangefs", NULL);
+       if (!debug_dir)
+               goto out;
+
+       help_file_dentry = debugfs_create_file(ORANGEFS_KMOD_DEBUG_HELP_FILE,
+                                 0444,
+                                 debug_dir,
+                                 debug_help_string,
+                                 &debug_help_fops);
+       if (!help_file_dentry)
+               goto out;
+
+       orangefs_debug_disabled = 0;
+       rc = 0;
+
+out:
+       if (rc)
+               orangefs_debugfs_cleanup();
+
+       return rc;
+}
+
+void orangefs_debugfs_cleanup(void)
+{
+       debugfs_remove_recursive(debug_dir);
+}
+
+/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
+static int orangefs_debug_help_open(struct inode *inode, struct file *file)
+{
+       int rc = -ENODEV;
+       int ret;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_help_open: start\n");
+
+       if (orangefs_debug_disabled)
+               goto out;
+
+       ret = seq_open(file, &help_debug_ops);
+       if (ret)
+               goto out;
+
+       ((struct seq_file *)(file->private_data))->private = inode->i_private;
+
+       rc = 0;
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_help_open: rc:%d:\n",
+                    rc);
+       return rc;
+}
+
+/*
+ * I think start always gets called again after stop. Start
+ * needs to return NULL when it is done. The whole "payload"
+ * in this case is a single (long) string, so by the second
+ * time we get to start (pos = 1), we're done.
+ */
+static void *help_start(struct seq_file *m, loff_t *pos)
+{
+       void *payload = NULL;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n");
+
+       if (*pos == 0)
+               payload = m->private;
+
+       return payload;
+}
+
+static void *help_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
+
+       return NULL;
+}
+
+static void help_stop(struct seq_file *m, void *p)
+{
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n");
+}
+
+static int help_show(struct seq_file *m, void *v)
+{
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_show: start\n");
+
+       seq_puts(m, v);
+
+       return 0;
+}
+
+/*
+ * initialize the kernel-debug file.
+ */
+int orangefs_kernel_debug_init(void)
+{
+
+       int rc = -ENOMEM;
+       struct dentry *ret;
+       char *k_buffer = NULL;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+       k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!k_buffer)
+               goto out;
+
+       if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+               strcpy(k_buffer, kernel_debug_string);
+               strcat(k_buffer, "\n");
+       } else {
+               strcpy(k_buffer, "none\n");
+               pr_info("%s: overflow 1!\n", __func__);
+       }
+
+       ret = debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE,
+                                 0444,
+                                 debug_dir,
+                                 k_buffer,
+                                 &kernel_debug_fops);
+       if (!ret) {
+               pr_info("%s: failed to create %s.\n",
+                       __func__,
+                       ORANGEFS_KMOD_DEBUG_FILE);
+               goto out;
+       }
+
+       rc = 0;
+
+out:
+       if (rc)
+               orangefs_debugfs_cleanup();
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+       return rc;
+}
+
+/*
+ * initialize the client-debug file.
+ */
+int orangefs_client_debug_init(void)
+{
+
+       int rc = -ENOMEM;
+       char *c_buffer = NULL;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+       c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!c_buffer)
+               goto out;
+
+       if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+               strcpy(c_buffer, client_debug_string);
+               strcat(c_buffer, "\n");
+       } else {
+               strcpy(c_buffer, "none\n");
+               pr_info("%s: overflow! 2\n", __func__);
+       }
+
+       client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE,
+                                                 0444,
+                                                 debug_dir,
+                                                 c_buffer,
+                                                 &kernel_debug_fops);
+       if (!client_debug_dentry) {
+               pr_info("%s: failed to create %s.\n",
+                       __func__,
+                       ORANGEFS_CLIENT_DEBUG_FILE);
+               goto out;
+       }
+
+       rc = 0;
+
+out:
+       if (rc)
+               orangefs_debugfs_cleanup();
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+       return rc;
+}
+
+/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
+int orangefs_debug_open(struct inode *inode, struct file *file)
+{
+       int rc = -ENODEV;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "%s: orangefs_debug_disabled: %d\n",
+                    __func__,
+                    orangefs_debug_disabled);
+
+       if (orangefs_debug_disabled)
+               goto out;
+
+       rc = 0;
+       mutex_lock(&orangefs_debug_lock);
+       file->private_data = inode->i_private;
+       mutex_unlock(&orangefs_debug_lock);
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_open: rc: %d\n",
+                    rc);
+       return rc;
+}
+
+static ssize_t orangefs_debug_read(struct file *file,
+                                char __user *ubuf,
+                                size_t count,
+                                loff_t *ppos)
+{
+       char *buf;
+       int sprintf_ret;
+       ssize_t read_ret = -ENOMEM;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_read: start\n");
+
+       buf = kmalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!buf)
+               goto out;
+
+       mutex_lock(&orangefs_debug_lock);
+       sprintf_ret = sprintf(buf, "%s", (char *)file->private_data);
+       mutex_unlock(&orangefs_debug_lock);
+
+       read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret);
+
+       kfree(buf);
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_read: ret: %zu\n",
+                    read_ret);
+
+       return read_ret;
+}
+
+static ssize_t orangefs_debug_write(struct file *file,
+                                 const char __user *ubuf,
+                                 size_t count,
+                                 loff_t *ppos)
+{
+       char *buf;
+       int rc = -EFAULT;
+       size_t silly = 0;
+       char *debug_string;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       struct client_debug_mask c_mask = { NULL, 0, 0 };
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+               "orangefs_debug_write: %s\n",
+               file->f_path.dentry->d_name.name);
+
+       /*
+        * Thwart users who try to jamb a ridiculous number
+        * of bytes into the debug file...
+        */
+       if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) {
+               silly = count;
+               count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1;
+       }
+
+       buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!buf)
+               goto out;
+
+       if (copy_from_user(buf, ubuf, count - 1)) {
+               gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                            "%s: copy_from_user failed!\n",
+                            __func__);
+               goto out;
+       }
+
+       /*
+        * Map the keyword string from userspace into a valid debug mask.
+        * The mapping process involves mapping the human-inputted string
+        * into a valid mask, and then rebuilding the string from the
+        * verified valid mask.
+        *
+        * A service operation is required to set a new client-side
+        * debug mask.
+        */
+       if (!strcmp(file->f_path.dentry->d_name.name,
+                   ORANGEFS_KMOD_DEBUG_FILE)) {
+               debug_string_to_mask(buf, &gossip_debug_mask, 0);
+               debug_mask_to_string(&gossip_debug_mask, 0);
+               debug_string = kernel_debug_string;
+               gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                            "New kernel debug string is %s\n",
+                            kernel_debug_string);
+       } else {
+               /* Can't reset client debug mask if client is not running. */
+               if (is_daemon_in_service()) {
+                       pr_info("%s: Client not running :%d:\n",
+                               __func__,
+                               is_daemon_in_service());
+                       goto out;
+               }
+
+               debug_string_to_mask(buf, &c_mask, 1);
+               debug_mask_to_string(&c_mask, 1);
+               debug_string = client_debug_string;
+
+               new_op = op_alloc(ORANGEFS_VFS_OP_PARAM);
+               if (!new_op) {
+                       pr_info("%s: op_alloc failed!\n", __func__);
+                       goto out;
+               }
+
+               new_op->upcall.req.param.op =
+                       ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES;
+               new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
+               memset(new_op->upcall.req.param.s_value,
+                      0,
+                      ORANGEFS_MAX_DEBUG_STRING_LEN);
+               sprintf(new_op->upcall.req.param.s_value,
+                       "%llx %llx\n",
+                       c_mask.mask1,
+                       c_mask.mask2);
+
+               /* service_operation returns 0 on success... */
+               rc = service_operation(new_op,
+                                      "orangefs_param",
+                                       ORANGEFS_OP_INTERRUPTIBLE);
+
+               if (rc)
+                       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                                    "%s: service_operation failed! rc:%d:\n",
+                                    __func__,
+                                    rc);
+
+               op_release(new_op);
+       }
+
+       mutex_lock(&orangefs_debug_lock);
+       memset(file->f_inode->i_private, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
+       sprintf((char *)file->f_inode->i_private, "%s\n", debug_string);
+       mutex_unlock(&orangefs_debug_lock);
+
+       *ppos += count;
+       if (silly)
+               rc = silly;
+       else
+               rc = count;
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_write: rc: %d\n",
+                    rc);
+       kfree(buf);
+       return rc;
+}
diff --git a/fs/orangefs/orangefs-debugfs.h b/fs/orangefs/orangefs-debugfs.h
new file mode 100644 (file)
index 0000000..e4828c0
--- /dev/null
@@ -0,0 +1,3 @@
+int orangefs_debugfs_init(void);
+int orangefs_kernel_debug_init(void);
+void orangefs_debugfs_cleanup(void);
diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
new file mode 100644 (file)
index 0000000..5a8725a
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#ifndef _ORANGEFS_DEV_PROTO_H
+#define _ORANGEFS_DEV_PROTO_H
+
+/*
+ * types and constants shared between user space and kernel space for
+ * device interaction using a common protocol
+ */
+
+/*
+ * valid orangefs kernel operation types
+ */
+#define ORANGEFS_VFS_OP_INVALID           0xFF000000
+#define ORANGEFS_VFS_OP_FILE_IO        0xFF000001
+#define ORANGEFS_VFS_OP_LOOKUP         0xFF000002
+#define ORANGEFS_VFS_OP_CREATE         0xFF000003
+#define ORANGEFS_VFS_OP_GETATTR        0xFF000004
+#define ORANGEFS_VFS_OP_REMOVE         0xFF000005
+#define ORANGEFS_VFS_OP_MKDIR          0xFF000006
+#define ORANGEFS_VFS_OP_READDIR        0xFF000007
+#define ORANGEFS_VFS_OP_SETATTR        0xFF000008
+#define ORANGEFS_VFS_OP_SYMLINK        0xFF000009
+#define ORANGEFS_VFS_OP_RENAME         0xFF00000A
+#define ORANGEFS_VFS_OP_STATFS         0xFF00000B
+#define ORANGEFS_VFS_OP_TRUNCATE       0xFF00000C
+#define ORANGEFS_VFS_OP_MMAP_RA_FLUSH  0xFF00000D
+#define ORANGEFS_VFS_OP_FS_MOUNT       0xFF00000E
+#define ORANGEFS_VFS_OP_FS_UMOUNT      0xFF00000F
+#define ORANGEFS_VFS_OP_GETXATTR       0xFF000010
+#define ORANGEFS_VFS_OP_SETXATTR          0xFF000011
+#define ORANGEFS_VFS_OP_LISTXATTR         0xFF000012
+#define ORANGEFS_VFS_OP_REMOVEXATTR       0xFF000013
+#define ORANGEFS_VFS_OP_PARAM          0xFF000014
+#define ORANGEFS_VFS_OP_PERF_COUNT     0xFF000015
+#define ORANGEFS_VFS_OP_CANCEL            0xFF00EE00
+#define ORANGEFS_VFS_OP_FSYNC          0xFF00EE01
+#define ORANGEFS_VFS_OP_FSKEY             0xFF00EE02
+#define ORANGEFS_VFS_OP_READDIRPLUS       0xFF00EE03
+
+/*
+ * Misc constants. Please retain them as multiples of 8!
+ * Otherwise 32-64 bit interactions will be messed up :)
+ */
+#define ORANGEFS_NAME_LEN              0x00000100
+#define ORANGEFS_MAX_DEBUG_STRING_LEN  0x00000400
+#define ORANGEFS_MAX_DEBUG_ARRAY_LEN   0x00000800
+
+/*
+ * The maximum number of directory entries in a single request is 96.
+ * XXX: Why can this not be higher. The client-side code can handle up to 512.
+ * XXX: What happens if we expect more than the client can return?
+ */
+#define ORANGEFS_MAX_DIRENT_COUNT_READDIR 96
+
+#include "upcall.h"
+#include "downcall.h"
+
+#endif
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
new file mode 100644 (file)
index 0000000..a8cde90
--- /dev/null
@@ -0,0 +1,689 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  The ORANGEFS Linux kernel support allows ORANGEFS volumes to be mounted and
+ *  accessed through the Linux VFS (i.e. using standard I/O system calls).
+ *  This support is only needed on clients that wish to mount the file system.
+ *
+ */
+
+/*
+ *  Declarations and macros for the ORANGEFS Linux kernel support.
+ */
+
+#ifndef __ORANGEFSKERNEL_H
+#define __ORANGEFSKERNEL_H
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/statfs.h>
+#include <linux/backing-dev.h>
+#include <linux/device.h>
+#include <linux/mpage.h>
+#include <linux/namei.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+
+#include <linux/aio.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/compat.h>
+#include <linux/mount.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/uio.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/wait.h>
+#include <linux/dcache.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/xattr.h>
+#include <linux/exportfs.h>
+
+#include <asm/unaligned.h>
+
+#include "orangefs-dev-proto.h"
+
+#ifdef ORANGEFS_KERNEL_DEBUG
+#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS       10
+#else
+#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS       20
+#endif
+
+#define ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS   30
+
+#define ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS     900     /* 15 minutes */
+
+#define ORANGEFS_REQDEVICE_NAME          "pvfs2-req"
+
+#define ORANGEFS_DEVREQ_MAGIC             0x20030529
+#define ORANGEFS_LINK_MAX                 0x000000FF
+#define ORANGEFS_PURGE_RETRY_COUNT     0x00000005
+#define ORANGEFS_SEEK_END              0x00000002
+#define ORANGEFS_MAX_NUM_OPTIONS          0x00000004
+#define ORANGEFS_MAX_MOUNT_OPT_LEN        0x00000080
+#define ORANGEFS_MAX_FSKEY_LEN            64
+
+#define MAX_DEV_REQ_UPSIZE (2 * sizeof(__s32) +   \
+sizeof(__u64) + sizeof(struct orangefs_upcall_s))
+#define MAX_DEV_REQ_DOWNSIZE (2 * sizeof(__s32) + \
+sizeof(__u64) + sizeof(struct orangefs_downcall_s))
+
+/*
+ * valid orangefs kernel operation states
+ *
+ * unknown  - op was just initialized
+ * waiting  - op is on request_list (upward bound)
+ * inprogr  - op is in progress (waiting for downcall)
+ * serviced - op has matching downcall; ok
+ * purged   - op has to start a timer since client-core
+ *            exited uncleanly before servicing op
+ * given up - submitter has given up waiting for it
+ */
+enum orangefs_vfs_op_states {
+       OP_VFS_STATE_UNKNOWN = 0,
+       OP_VFS_STATE_WAITING = 1,
+       OP_VFS_STATE_INPROGR = 2,
+       OP_VFS_STATE_SERVICED = 4,
+       OP_VFS_STATE_PURGED = 8,
+       OP_VFS_STATE_GIVEN_UP = 16,
+};
+
+/*
+ * Defines for controlling whether I/O upcalls are for async or sync operations
+ */
+enum ORANGEFS_async_io_type {
+       ORANGEFS_VFS_SYNC_IO = 0,
+       ORANGEFS_VFS_ASYNC_IO = 1,
+};
+
+/*
+ * An array of client_debug_mask will be built to hold debug keyword/mask
+ * values fetched from userspace.
+ */
+struct client_debug_mask {
+       char *keyword;
+       __u64 mask1;
+       __u64 mask2;
+};
+
+/*
+ * orangefs kernel memory related flags
+ */
+
+#if ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB))
+#define ORANGEFS_CACHE_CREATE_FLAGS SLAB_RED_ZONE
+#else
+#define ORANGEFS_CACHE_CREATE_FLAGS 0
+#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */
+
+#define ORANGEFS_GFP_FLAGS (GFP_KERNEL)
+#define ORANGEFS_BUFMAP_GFP_FLAGS (GFP_KERNEL)
+
+/* orangefs xattr and acl related defines */
+#define ORANGEFS_XATTR_INDEX_POSIX_ACL_ACCESS  1
+#define ORANGEFS_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define ORANGEFS_XATTR_INDEX_TRUSTED           3
+#define ORANGEFS_XATTR_INDEX_DEFAULT           4
+
+#define ORANGEFS_XATTR_NAME_ACL_ACCESS XATTR_NAME_POSIX_ACL_ACCESS
+#define ORANGEFS_XATTR_NAME_ACL_DEFAULT XATTR_NAME_POSIX_ACL_DEFAULT
+#define ORANGEFS_XATTR_NAME_TRUSTED_PREFIX "trusted."
+#define ORANGEFS_XATTR_NAME_DEFAULT_PREFIX ""
+
+/* these functions are defined in orangefs-utils.c */
+int orangefs_prepare_cdm_array(char *debug_array_string);
+int orangefs_prepare_debugfs_help_string(int);
+
+/* defined in orangefs-debugfs.c */
+int orangefs_client_debug_init(void);
+
+void debug_string_to_mask(char *, void *, int);
+void do_c_mask(int, char *, struct client_debug_mask **);
+void do_k_mask(int, char *, __u64 **);
+
+void debug_mask_to_string(void *, int);
+void do_k_string(void *, int);
+void do_c_string(void *, int);
+int check_amalgam_keyword(void *, int);
+int keyword_is_amalgam(char *);
+
+/*these variables are defined in orangefs-mod.c */
+extern char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+extern char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+extern char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+extern unsigned int kernel_mask_set_mod_init;
+
+extern int orangefs_init_acl(struct inode *inode, struct inode *dir);
+extern const struct xattr_handler *orangefs_xattr_handlers[];
+
+extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type);
+extern int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+
+/*
+ * Redefine xtvec structure so that we could move helper functions out of
+ * the define
+ */
+struct xtvec {
+       __kernel_off_t xtv_off;         /* must be off_t */
+       __kernel_size_t xtv_len;        /* must be size_t */
+};
+
+/*
+ * orangefs data structures
+ */
+struct orangefs_kernel_op_s {
+       enum orangefs_vfs_op_states op_state;
+       __u64 tag;
+
+       /*
+        * Set uses_shared_memory to 1 if this operation uses shared memory.
+        * If true, then a retry on the op must also get a new shared memory
+        * buffer and re-populate it.
+        */
+       int uses_shared_memory;
+
+       struct orangefs_upcall_s upcall;
+       struct orangefs_downcall_s downcall;
+
+       wait_queue_head_t waitq;
+       spinlock_t lock;
+
+       struct completion done;
+
+       atomic_t ref_count;
+
+       /* VFS aio fields */
+
+       int attempts;
+
+       struct list_head list;
+};
+
+#define set_op_state_waiting(op)     ((op)->op_state = OP_VFS_STATE_WAITING)
+#define set_op_state_inprogress(op)  ((op)->op_state = OP_VFS_STATE_INPROGR)
+#define set_op_state_given_up(op)  ((op)->op_state = OP_VFS_STATE_GIVEN_UP)
+static inline void set_op_state_serviced(struct orangefs_kernel_op_s *op)
+{
+       op->op_state = OP_VFS_STATE_SERVICED;
+       wake_up_interruptible(&op->waitq);
+}
+static inline void set_op_state_purged(struct orangefs_kernel_op_s *op)
+{
+       op->op_state |= OP_VFS_STATE_PURGED;
+       wake_up_interruptible(&op->waitq);
+}
+
+#define op_state_waiting(op)     ((op)->op_state & OP_VFS_STATE_WAITING)
+#define op_state_in_progress(op) ((op)->op_state & OP_VFS_STATE_INPROGR)
+#define op_state_serviced(op)    ((op)->op_state & OP_VFS_STATE_SERVICED)
+#define op_state_purged(op)      ((op)->op_state & OP_VFS_STATE_PURGED)
+#define op_state_given_up(op)    ((op)->op_state & OP_VFS_STATE_GIVEN_UP)
+
+static inline void get_op(struct orangefs_kernel_op_s *op)
+{
+       atomic_inc(&op->ref_count);
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                       "(get) Alloced OP (%p:%llu)\n", op, llu(op->tag));
+}
+
+void __op_release(struct orangefs_kernel_op_s *op);
+
+static inline void op_release(struct orangefs_kernel_op_s *op)
+{
+       if (atomic_dec_and_test(&op->ref_count)) {
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                       "(put) Releasing OP (%p:%llu)\n", op, llu((op)->tag));
+               __op_release(op);
+       }
+}
+
+/* per inode private orangefs info */
+struct orangefs_inode_s {
+       struct orangefs_object_kref refn;
+       char link_target[ORANGEFS_NAME_MAX];
+       __s64 blksize;
+       /*
+        * Reading/Writing Extended attributes need to acquire the appropriate
+        * reader/writer semaphore on the orangefs_inode_s structure.
+        */
+       struct rw_semaphore xattr_sem;
+
+       struct inode vfs_inode;
+       sector_t last_failed_block_index_read;
+
+       /*
+        * State of in-memory attributes not yet flushed to disk associated
+        * with this object
+        */
+       unsigned long pinode_flags;
+};
+
+#define P_ATIME_FLAG 0
+#define P_MTIME_FLAG 1
+#define P_CTIME_FLAG 2
+#define P_MODE_FLAG  3
+
+#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
+#define SetAtimeFlag(pinode)   set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
+#define AtimeFlag(pinode)      test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
+
+#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
+#define SetMtimeFlag(pinode)   set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
+#define MtimeFlag(pinode)      test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
+
+#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
+#define SetCtimeFlag(pinode)   set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
+#define CtimeFlag(pinode)      test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
+
+#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
+#define SetModeFlag(pinode)   set_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
+#define ModeFlag(pinode)      test_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
+
+/* per superblock private orangefs info */
+struct orangefs_sb_info_s {
+       struct orangefs_khandle root_khandle;
+       __s32 fs_id;
+       int id;
+       int flags;
+#define ORANGEFS_OPT_INTR      0x01
+#define ORANGEFS_OPT_LOCAL_LOCK        0x02
+       char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
+       struct super_block *sb;
+       int mount_pending;
+       struct list_head list;
+};
+
+/*
+ * structure that holds the state of any async I/O operation issued
+ * through the VFS. Needed especially to handle cancellation requests
+ * or even completion notification so that the VFS client-side daemon
+ * can free up its vfs_request slots.
+ */
+struct orangefs_kiocb_s {
+       /* the pointer to the task that initiated the AIO */
+       struct task_struct *tsk;
+
+       /* pointer to the kiocb that kicked this operation */
+       struct kiocb *kiocb;
+
+       /* buffer index that was used for the I/O */
+       struct orangefs_bufmap *bufmap;
+       int buffer_index;
+
+       /* orangefs kernel operation type */
+       struct orangefs_kernel_op_s *op;
+
+       /* The user space buffers from/to which I/O is being staged */
+       struct iovec *iov;
+
+       /* number of elements in the iovector */
+       unsigned long nr_segs;
+
+       /* set to indicate the type of the operation */
+       int rw;
+
+       /* file offset */
+       loff_t offset;
+
+       /* and the count in bytes */
+       size_t bytes_to_be_copied;
+
+       ssize_t bytes_copied;
+       int needs_cleanup;
+};
+
+struct orangefs_stats {
+       unsigned long cache_hits;
+       unsigned long cache_misses;
+       unsigned long reads;
+       unsigned long writes;
+};
+
+extern struct orangefs_stats g_orangefs_stats;
+
+/*
+ * NOTE: See Documentation/filesystems/porting for information
+ * on implementing FOO_I and properly accessing fs private data
+ */
+static inline struct orangefs_inode_s *ORANGEFS_I(struct inode *inode)
+{
+       return container_of(inode, struct orangefs_inode_s, vfs_inode);
+}
+
+static inline struct orangefs_sb_info_s *ORANGEFS_SB(struct super_block *sb)
+{
+       return (struct orangefs_sb_info_s *) sb->s_fs_info;
+}
+
+/* ino_t descends from "unsigned long", 8 bytes, 64 bits. */
+static inline ino_t orangefs_khandle_to_ino(struct orangefs_khandle *khandle)
+{
+       union {
+               unsigned char u[8];
+               __u64 ino;
+       } ihandle;
+
+       ihandle.u[0] = khandle->u[0] ^ khandle->u[4];
+       ihandle.u[1] = khandle->u[1] ^ khandle->u[5];
+       ihandle.u[2] = khandle->u[2] ^ khandle->u[6];
+       ihandle.u[3] = khandle->u[3] ^ khandle->u[7];
+       ihandle.u[4] = khandle->u[12] ^ khandle->u[8];
+       ihandle.u[5] = khandle->u[13] ^ khandle->u[9];
+       ihandle.u[6] = khandle->u[14] ^ khandle->u[10];
+       ihandle.u[7] = khandle->u[15] ^ khandle->u[11];
+
+       return ihandle.ino;
+}
+
+static inline struct orangefs_khandle *get_khandle_from_ino(struct inode *inode)
+{
+       return &(ORANGEFS_I(inode)->refn.khandle);
+}
+
+static inline __s32 get_fsid_from_ino(struct inode *inode)
+{
+       return ORANGEFS_I(inode)->refn.fs_id;
+}
+
+static inline ino_t get_ino_from_khandle(struct inode *inode)
+{
+       struct orangefs_khandle *khandle;
+       ino_t ino;
+
+       khandle = get_khandle_from_ino(inode);
+       ino = orangefs_khandle_to_ino(khandle);
+       return ino;
+}
+
+static inline ino_t get_parent_ino_from_dentry(struct dentry *dentry)
+{
+       return get_ino_from_khandle(dentry->d_parent->d_inode);
+}
+
+static inline int is_root_handle(struct inode *inode)
+{
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                    "%s: root handle: %pU, this handle: %pU:\n",
+                    __func__,
+                    &ORANGEFS_SB(inode->i_sb)->root_khandle,
+                    get_khandle_from_ino(inode));
+
+       if (ORANGEFS_khandle_cmp(&(ORANGEFS_SB(inode->i_sb)->root_khandle),
+                            get_khandle_from_ino(inode)))
+               return 0;
+       else
+               return 1;
+}
+
+static inline int match_handle(struct orangefs_khandle resp_handle,
+                              struct inode *inode)
+{
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                    "%s: one handle: %pU, another handle:%pU:\n",
+                    __func__,
+                    &resp_handle,
+                    get_khandle_from_ino(inode));
+
+       if (ORANGEFS_khandle_cmp(&resp_handle, get_khandle_from_ino(inode)))
+               return 0;
+       else
+               return 1;
+}
+
+/*
+ * defined in orangefs-cache.c
+ */
+int op_cache_initialize(void);
+int op_cache_finalize(void);
+struct orangefs_kernel_op_s *op_alloc(__s32 type);
+char *get_opname_string(struct orangefs_kernel_op_s *new_op);
+
+int orangefs_inode_cache_initialize(void);
+int orangefs_inode_cache_finalize(void);
+
+/*
+ * defined in orangefs-mod.c
+ */
+void purge_inprogress_ops(void);
+
+/*
+ * defined in waitqueue.c
+ */
+void purge_waiting_ops(void);
+
+/*
+ * defined in super.c
+ */
+struct dentry *orangefs_mount(struct file_system_type *fst,
+                          int flags,
+                          const char *devname,
+                          void *data);
+
+void orangefs_kill_sb(struct super_block *sb);
+int orangefs_remount(struct super_block *sb);
+
+int fsid_key_table_initialize(void);
+void fsid_key_table_finalize(void);
+
+/*
+ * defined in inode.c
+ */
+__u32 convert_to_orangefs_mask(unsigned long lite_mask);
+struct inode *orangefs_new_inode(struct super_block *sb,
+                             struct inode *dir,
+                             int mode,
+                             dev_t dev,
+                             struct orangefs_object_kref *ref);
+
+int orangefs_setattr(struct dentry *dentry, struct iattr *iattr);
+
+int orangefs_getattr(struct vfsmount *mnt,
+                 struct dentry *dentry,
+                 struct kstat *kstat);
+
+int orangefs_permission(struct inode *inode, int mask);
+
+/*
+ * defined in xattr.c
+ */
+int orangefs_setxattr(struct dentry *dentry,
+                  const char *name,
+                  const void *value,
+                  size_t size,
+                  int flags);
+
+ssize_t orangefs_getxattr(struct dentry *dentry,
+                      const char *name,
+                      void *buffer,
+                      size_t size);
+
+ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+/*
+ * defined in namei.c
+ */
+struct inode *orangefs_iget(struct super_block *sb,
+                        struct orangefs_object_kref *ref);
+
+ssize_t orangefs_inode_read(struct inode *inode,
+                           struct iov_iter *iter,
+                           loff_t *offset,
+                           loff_t readahead_size);
+
+/*
+ * defined in devorangefs-req.c
+ */
+int orangefs_dev_init(void);
+void orangefs_dev_cleanup(void);
+int is_daemon_in_service(void);
+int fs_mount_pending(__s32 fsid);
+
+/*
+ * defined in orangefs-utils.c
+ */
+__s32 fsid_of_op(struct orangefs_kernel_op_s *op);
+
+int orangefs_flush_inode(struct inode *inode);
+
+ssize_t orangefs_inode_getxattr(struct inode *inode,
+                            const char *prefix,
+                            const char *name,
+                            void *buffer,
+                            size_t size);
+
+int orangefs_inode_setxattr(struct inode *inode,
+                        const char *prefix,
+                        const char *name,
+                        const void *value,
+                        size_t size,
+                        int flags);
+
+int orangefs_inode_getattr(struct inode *inode, __u32 mask, int check);
+
+int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr);
+
+void orangefs_make_bad_inode(struct inode *inode);
+
+void orangefs_block_signals(sigset_t *);
+
+void orangefs_set_signals(sigset_t *);
+
+int orangefs_unmount_sb(struct super_block *sb);
+
+int orangefs_cancel_op_in_progress(__u64 tag);
+
+static inline __u64 orangefs_convert_time_field(const struct timespec *ts)
+{
+       return (__u64)ts->tv_sec;
+}
+
+int orangefs_normalize_to_errno(__s32 error_code);
+
+extern struct mutex devreq_mutex;
+extern struct mutex request_mutex;
+extern int debug;
+extern int op_timeout_secs;
+extern int slot_timeout_secs;
+extern struct list_head orangefs_superblocks;
+extern spinlock_t orangefs_superblocks_lock;
+extern struct list_head orangefs_request_list;
+extern spinlock_t orangefs_request_list_lock;
+extern wait_queue_head_t orangefs_request_list_waitq;
+extern struct list_head *htable_ops_in_progress;
+extern spinlock_t htable_ops_in_progress_lock;
+extern int hash_table_size;
+
+extern const struct address_space_operations orangefs_address_operations;
+extern struct backing_dev_info orangefs_backing_dev_info;
+extern struct inode_operations orangefs_file_inode_operations;
+extern const struct file_operations orangefs_file_operations;
+extern struct inode_operations orangefs_symlink_inode_operations;
+extern struct inode_operations orangefs_dir_inode_operations;
+extern const struct file_operations orangefs_dir_operations;
+extern const struct dentry_operations orangefs_dentry_operations;
+extern const struct file_operations orangefs_devreq_file_operations;
+
+extern wait_queue_head_t orangefs_bufmap_init_waitq;
+
+/*
+ * misc convenience macros
+ */
+
+#define ORANGEFS_OP_INTERRUPTIBLE 1   /* service_operation() is interruptible */
+#define ORANGEFS_OP_PRIORITY      2   /* service_operation() is high priority */
+#define ORANGEFS_OP_CANCELLATION  4   /* this is a cancellation */
+#define ORANGEFS_OP_NO_SEMAPHORE  8   /* don't acquire semaphore */
+#define ORANGEFS_OP_ASYNC         16  /* Queue it, but don't wait */
+
+int service_operation(struct orangefs_kernel_op_s *op,
+                     const char *op_name,
+                     int flags);
+
+#define get_interruptible_flag(inode) \
+       ((ORANGEFS_SB(inode->i_sb)->flags & ORANGEFS_OPT_INTR) ? \
+               ORANGEFS_OP_INTERRUPTIBLE : 0)
+
+#define add_orangefs_sb(sb)                                            \
+do {                                                                   \
+       gossip_debug(GOSSIP_SUPER_DEBUG,                                \
+                    "Adding SB %p to orangefs superblocks\n",          \
+                    ORANGEFS_SB(sb));                                  \
+       spin_lock(&orangefs_superblocks_lock);                          \
+       list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks);           \
+       spin_unlock(&orangefs_superblocks_lock); \
+} while (0)
+
+#define remove_orangefs_sb(sb)                                         \
+do {                                                                   \
+       struct list_head *tmp = NULL;                                   \
+       struct list_head *tmp_safe = NULL;                              \
+       struct orangefs_sb_info_s *orangefs_sb = NULL;                  \
+                                                                       \
+       spin_lock(&orangefs_superblocks_lock);                          \
+       list_for_each_safe(tmp, tmp_safe, &orangefs_superblocks) {              \
+               orangefs_sb = list_entry(tmp,                           \
+                                     struct orangefs_sb_info_s,                \
+                                     list);                            \
+               if (orangefs_sb && (orangefs_sb->sb == sb)) {                   \
+                       gossip_debug(GOSSIP_SUPER_DEBUG,                \
+                           "Removing SB %p from orangefs superblocks\n",       \
+                       orangefs_sb);                                   \
+                       list_del(&orangefs_sb->list);                   \
+                       break;                                          \
+               }                                                       \
+       }                                                               \
+       spin_unlock(&orangefs_superblocks_lock);                                \
+} while (0)
+
+#define orangefs_lock_inode(inode) spin_lock(&inode->i_lock)
+#define orangefs_unlock_inode(inode) spin_unlock(&inode->i_lock)
+
+#define fill_default_sys_attrs(sys_attr, type, mode)                   \
+do {                                                                   \
+       sys_attr.owner = from_kuid(current_user_ns(), current_fsuid()); \
+       sys_attr.group = from_kgid(current_user_ns(), current_fsgid()); \
+       sys_attr.size = 0;                                              \
+       sys_attr.perms = ORANGEFS_util_translate_mode(mode);            \
+       sys_attr.objtype = type;                                        \
+       sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE;                  \
+} while (0)
+
+#define orangefs_inode_lock(__i)  mutex_lock(&(__i)->i_mutex)
+
+#define orangefs_inode_unlock(__i) mutex_unlock(&(__i)->i_mutex)
+
+static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
+{
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+       orangefs_inode_lock(inode);
+#endif
+       i_size_write(inode, i_size);
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+       orangefs_inode_unlock(inode);
+#endif
+}
+
+static inline unsigned int diff(struct timeval *end, struct timeval *begin)
+{
+       if (end->tv_usec < begin->tv_usec) {
+               end->tv_usec += 1000000;
+               end->tv_sec--;
+       }
+       end->tv_sec -= begin->tv_sec;
+       end->tv_usec -= begin->tv_usec;
+       return (end->tv_sec * 1000000) + end->tv_usec;
+}
+
+#endif /* __ORANGEFSKERNEL_H */
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
new file mode 100644 (file)
index 0000000..7639ab2
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * Changes by Acxiom Corporation to add proc file handler for pvfs2 client
+ * parameters, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-debugfs.h"
+#include "orangefs-sysfs.h"
+
+/* ORANGEFS_VERSION is a ./configure define */
+#ifndef ORANGEFS_VERSION
+#define ORANGEFS_VERSION "upstream"
+#endif
+
+/*
+ * global variables declared here
+ */
+
+/* array of client debug keyword/mask values */
+struct client_debug_mask *cdm_array;
+int cdm_element_count;
+
+char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none";
+char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+
+char *debug_help_string;
+int help_string_initialized;
+struct dentry *help_file_dentry;
+struct dentry *client_debug_dentry;
+struct dentry *debug_dir;
+int client_verbose_index;
+int client_all_index;
+struct orangefs_stats g_orangefs_stats;
+
+/* the size of the hash tables for ops in progress */
+int hash_table_size = 509;
+
+static ulong module_parm_debug_mask;
+__u64 gossip_debug_mask;
+struct client_debug_mask client_debug_mask = { NULL, 0, 0 };
+unsigned int kernel_mask_set_mod_init; /* implicitly false */
+int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS;
+int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ORANGEFS Development Team");
+MODULE_DESCRIPTION("The Linux Kernel VFS interface to ORANGEFS");
+MODULE_PARM_DESC(module_parm_debug_mask, "debugging level (see orangefs-debug.h for values)");
+MODULE_PARM_DESC(op_timeout_secs, "Operation timeout in seconds");
+MODULE_PARM_DESC(slot_timeout_secs, "Slot timeout in seconds");
+MODULE_PARM_DESC(hash_table_size,
+                "size of hash table for operations in progress");
+
+static struct file_system_type orangefs_fs_type = {
+       .name = "pvfs2",
+       .mount = orangefs_mount,
+       .kill_sb = orangefs_kill_sb,
+       .owner = THIS_MODULE,
+};
+
+module_param(hash_table_size, int, 0);
+module_param(module_parm_debug_mask, ulong, 0644);
+module_param(op_timeout_secs, int, 0);
+module_param(slot_timeout_secs, int, 0);
+
+/* synchronizes the request device file */
+DEFINE_MUTEX(devreq_mutex);
+
+/*
+ * Blocks non-priority requests from being queued for servicing.  This
+ * could be used for protecting the request list data structure, but
+ * for now it's only being used to stall the op addition to the request
+ * list
+ */
+DEFINE_MUTEX(request_mutex);
+
+/* hash table for storing operations waiting for matching downcall */
+struct list_head *htable_ops_in_progress;
+DEFINE_SPINLOCK(htable_ops_in_progress_lock);
+
+/* list for queueing upcall operations */
+LIST_HEAD(orangefs_request_list);
+
+/* used to protect the above orangefs_request_list */
+DEFINE_SPINLOCK(orangefs_request_list_lock);
+
+/* used for incoming request notification */
+DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq);
+
+static int __init orangefs_init(void)
+{
+       int ret = -1;
+       __u32 i = 0;
+
+       /* convert input debug mask to a 64-bit unsigned integer */
+       gossip_debug_mask = (unsigned long long) module_parm_debug_mask;
+
+       /*
+        * set the kernel's gossip debug string; invalid mask values will
+        * be ignored.
+        */
+       debug_mask_to_string(&gossip_debug_mask, 0);
+
+       /* remove any invalid values from the mask */
+       debug_string_to_mask(kernel_debug_string, &gossip_debug_mask, 0);
+
+       /*
+        * if the mask has a non-zero value, then indicate that the mask
+        * was set when the kernel module was loaded.  The orangefs dev ioctl
+        * command will look at this boolean to determine if the kernel's
+        * debug mask should be overwritten when the client-core is started.
+        */
+       if (gossip_debug_mask != 0)
+               kernel_mask_set_mod_init = true;
+
+       /* print information message to the system log */
+       pr_info("orangefs: orangefs_init called with debug mask: :%s: :%llx:\n",
+              kernel_debug_string,
+              (unsigned long long)gossip_debug_mask);
+
+       ret = bdi_init(&orangefs_backing_dev_info);
+
+       if (ret)
+               return ret;
+
+       if (op_timeout_secs < 0)
+               op_timeout_secs = 0;
+
+       if (slot_timeout_secs < 0)
+               slot_timeout_secs = 0;
+
+       /* initialize global book keeping data structures */
+       ret = op_cache_initialize();
+       if (ret < 0)
+               goto err;
+
+       ret = orangefs_inode_cache_initialize();
+       if (ret < 0)
+               goto cleanup_op;
+
+       /* Initialize the orangefsdev subsystem. */
+       ret = orangefs_dev_init();
+       if (ret < 0) {
+               gossip_err("orangefs: could not initialize device subsystem %d!\n",
+                          ret);
+               goto cleanup_inode;
+       }
+
+       htable_ops_in_progress =
+           kcalloc(hash_table_size, sizeof(struct list_head), GFP_KERNEL);
+       if (!htable_ops_in_progress) {
+               gossip_err("Failed to initialize op hashtable");
+               ret = -ENOMEM;
+               goto cleanup_device;
+       }
+
+       /* initialize a doubly linked at each hash table index */
+       for (i = 0; i < hash_table_size; i++)
+               INIT_LIST_HEAD(&htable_ops_in_progress[i]);
+
+       ret = fsid_key_table_initialize();
+       if (ret < 0)
+               goto cleanup_progress_table;
+
+       /*
+        * Build the contents of /sys/kernel/debug/orangefs/debug-help
+        * from the keywords in the kernel keyword/mask array.
+        *
+        * The keywords in the client keyword/mask array are
+        * unknown at boot time.
+        *
+        * orangefs_prepare_debugfs_help_string will be used again
+        * later to rebuild the debug-help file after the client starts
+        * and passes along the needed info. The argument signifies
+        * which time orangefs_prepare_debugfs_help_string is being
+        * called.
+        *
+        */
+       ret = orangefs_prepare_debugfs_help_string(1);
+       if (ret)
+               goto out;
+
+       orangefs_debugfs_init();
+       orangefs_kernel_debug_init();
+       orangefs_sysfs_init();
+
+       ret = register_filesystem(&orangefs_fs_type);
+       if (ret == 0) {
+               pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION);
+               return 0;
+       }
+
+       orangefs_debugfs_cleanup();
+       orangefs_sysfs_exit();
+       fsid_key_table_finalize();
+
+cleanup_progress_table:
+       kfree(htable_ops_in_progress);
+
+cleanup_device:
+       orangefs_dev_cleanup();
+
+cleanup_inode:
+       orangefs_inode_cache_finalize();
+
+cleanup_op:
+       op_cache_finalize();
+
+err:
+       bdi_destroy(&orangefs_backing_dev_info);
+
+out:
+       return ret;
+}
+
+static void __exit orangefs_exit(void)
+{
+       int i = 0;
+       gossip_debug(GOSSIP_INIT_DEBUG, "orangefs: orangefs_exit called\n");
+
+       unregister_filesystem(&orangefs_fs_type);
+       orangefs_debugfs_cleanup();
+       orangefs_sysfs_exit();
+       fsid_key_table_finalize();
+       orangefs_dev_cleanup();
+       BUG_ON(!list_empty(&orangefs_request_list));
+       for (i = 0; i < hash_table_size; i++)
+               BUG_ON(!list_empty(&htable_ops_in_progress[i]));
+
+       orangefs_inode_cache_finalize();
+       op_cache_finalize();
+
+       kfree(htable_ops_in_progress);
+
+       bdi_destroy(&orangefs_backing_dev_info);
+
+       pr_info("orangefs: module version %s unloaded\n", ORANGEFS_VERSION);
+}
+
+/*
+ * What we do in this function is to walk the list of operations
+ * that are in progress in the hash table and mark them as purged as well.
+ */
+void purge_inprogress_ops(void)
+{
+       int i;
+
+       for (i = 0; i < hash_table_size; i++) {
+               struct orangefs_kernel_op_s *op;
+               struct orangefs_kernel_op_s *next;
+
+               spin_lock(&htable_ops_in_progress_lock);
+               list_for_each_entry_safe(op,
+                                        next,
+                                        &htable_ops_in_progress[i],
+                                        list) {
+                       spin_lock(&op->lock);
+                       gossip_debug(GOSSIP_INIT_DEBUG,
+                               "pvfs2-client-core: purging in-progress op tag "
+                               "%llu %s\n",
+                               llu(op->tag),
+                               get_opname_string(op));
+                       set_op_state_purged(op);
+                       spin_unlock(&op->lock);
+               }
+               spin_unlock(&htable_ops_in_progress_lock);
+       }
+}
+
+module_init(orangefs_init);
+module_exit(orangefs_exit);
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
new file mode 100644 (file)
index 0000000..83f4053
--- /dev/null
@@ -0,0 +1,1773 @@
+/*
+ * Documentation/ABI/stable/orangefs-sysfs:
+ *
+ * What:               /sys/fs/orangefs/perf_counter_reset
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     echo a 0 or a 1 into perf_counter_reset to
+ *                     reset all the counters in
+ *                     /sys/fs/orangefs/perf_counters
+ *                     except ones with PINT_PERF_PRESERVE set.
+ *
+ *
+ * What:               /sys/fs/orangefs/perf_counters/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Counters and settings for various caches.
+ *                     Read only.
+ *
+ *
+ * What:               /sys/fs/orangefs/perf_time_interval_secs
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Length of perf counter intervals in
+ *                     seconds.
+ *
+ *
+ * What:               /sys/fs/orangefs/perf_history_size
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     The perf_counters cache statistics have N, or
+ *                     perf_history_size, samples. The default is
+ *                     one.
+ *
+ *                     Every perf_time_interval_secs the (first)
+ *                     samples are reset.
+ *
+ *                     If N is greater than one, the "current" set
+ *                     of samples is reset, and the samples from the
+ *                     other N-1 intervals remain available.
+ *
+ *
+ * What:               /sys/fs/orangefs/op_timeout_secs
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Service operation timeout in seconds.
+ *
+ *
+ * What:               /sys/fs/orangefs/slot_timeout_secs
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     "Slot" timeout in seconds. A "slot"
+ *                     is an indexed buffer in the shared
+ *                     memory segment used for communication
+ *                     between the kernel module and userspace.
+ *                     Slots are requested and waited for,
+ *                     the wait times out after slot_timeout_secs.
+ *
+ *
+ * What:               /sys/fs/orangefs/acache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Attribute cache configurable settings.
+ *
+ *
+ * What:               /sys/fs/orangefs/ncache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Name cache configurable settings.
+ *
+ *
+ * What:               /sys/fs/orangefs/capcache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Capability cache configurable settings.
+ *
+ *
+ * What:               /sys/fs/orangefs/ccache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Credential cache configurable settings.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-sysfs.h"
+
+#define ORANGEFS_KOBJ_ID "orangefs"
+#define ACACHE_KOBJ_ID "acache"
+#define CAPCACHE_KOBJ_ID "capcache"
+#define CCACHE_KOBJ_ID "ccache"
+#define NCACHE_KOBJ_ID "ncache"
+#define PC_KOBJ_ID "pc"
+#define STATS_KOBJ_ID "stats"
+
+struct orangefs_obj {
+       struct kobject kobj;
+       int op_timeout_secs;
+       int perf_counter_reset;
+       int perf_history_size;
+       int perf_time_interval_secs;
+       int slot_timeout_secs;
+};
+
+struct acache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_msecs;
+};
+
+struct capcache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_secs;
+};
+
+struct ccache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_secs;
+};
+
+struct ncache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_msecs;
+};
+
+struct pc_orangefs_obj {
+       struct kobject kobj;
+       char *acache;
+       char *capcache;
+       char *ncache;
+};
+
+struct stats_orangefs_obj {
+       struct kobject kobj;
+       int reads;
+       int writes;
+};
+
+struct orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct orangefs_obj *orangefs_obj,
+                       struct orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct orangefs_obj *orangefs_obj,
+                        struct orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct acache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct acache_orangefs_obj *acache_orangefs_obj,
+                       struct acache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct acache_orangefs_obj *acache_orangefs_obj,
+                        struct acache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct capcache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct capcache_orangefs_obj *capcache_orangefs_obj,
+                       struct capcache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct capcache_orangefs_obj *capcache_orangefs_obj,
+                        struct capcache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct ccache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct ccache_orangefs_obj *ccache_orangefs_obj,
+                       struct ccache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct ccache_orangefs_obj *ccache_orangefs_obj,
+                        struct ccache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct ncache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                       struct ncache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                        struct ncache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct pc_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct pc_orangefs_obj *pc_orangefs_obj,
+                       struct pc_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct pc_orangefs_obj *pc_orangefs_obj,
+                        struct pc_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct stats_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct stats_orangefs_obj *stats_orangefs_obj,
+                       struct stats_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct stats_orangefs_obj *stats_orangefs_obj,
+                        struct stats_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+static ssize_t orangefs_attr_show(struct kobject *kobj,
+                                 struct attribute *attr,
+                                 char *buf)
+{
+       struct orangefs_attribute *attribute;
+       struct orangefs_obj *orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct orangefs_attribute, attr);
+       orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t orangefs_attr_store(struct kobject *kobj,
+                                  struct attribute *attr,
+                                  const char *buf,
+                                  size_t len)
+{
+       struct orangefs_attribute *attribute;
+       struct orangefs_obj *orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "orangefs_attr_store: start\n");
+
+       attribute = container_of(attr, struct orangefs_attribute, attr);
+       orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops orangefs_sysfs_ops = {
+       .show = orangefs_attr_show,
+       .store = orangefs_attr_store,
+};
+
+static ssize_t acache_orangefs_attr_show(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        char *buf)
+{
+       struct acache_orangefs_attribute *attribute;
+       struct acache_orangefs_obj *acache_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct acache_orangefs_attribute, attr);
+       acache_orangefs_obj =
+               container_of(kobj, struct acache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(acache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t acache_orangefs_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf,
+                                         size_t len)
+{
+       struct acache_orangefs_attribute *attribute;
+       struct acache_orangefs_obj *acache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "acache_orangefs_attr_store: start\n");
+
+       attribute = container_of(attr, struct acache_orangefs_attribute, attr);
+       acache_orangefs_obj =
+               container_of(kobj, struct acache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(acache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops acache_orangefs_sysfs_ops = {
+       .show = acache_orangefs_attr_show,
+       .store = acache_orangefs_attr_store,
+};
+
+static ssize_t capcache_orangefs_attr_show(struct kobject *kobj,
+                                          struct attribute *attr,
+                                          char *buf)
+{
+       struct capcache_orangefs_attribute *attribute;
+       struct capcache_orangefs_obj *capcache_orangefs_obj;
+       int rc;
+
+       attribute =
+               container_of(attr, struct capcache_orangefs_attribute, attr);
+       capcache_orangefs_obj =
+               container_of(kobj, struct capcache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(capcache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t capcache_orangefs_attr_store(struct kobject *kobj,
+                                           struct attribute *attr,
+                                           const char *buf,
+                                           size_t len)
+{
+       struct capcache_orangefs_attribute *attribute;
+       struct capcache_orangefs_obj *capcache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "capcache_orangefs_attr_store: start\n");
+
+       attribute =
+               container_of(attr, struct capcache_orangefs_attribute, attr);
+       capcache_orangefs_obj =
+               container_of(kobj, struct capcache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(capcache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops capcache_orangefs_sysfs_ops = {
+       .show = capcache_orangefs_attr_show,
+       .store = capcache_orangefs_attr_store,
+};
+
+static ssize_t ccache_orangefs_attr_show(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        char *buf)
+{
+       struct ccache_orangefs_attribute *attribute;
+       struct ccache_orangefs_obj *ccache_orangefs_obj;
+       int rc;
+
+       attribute =
+               container_of(attr, struct ccache_orangefs_attribute, attr);
+       ccache_orangefs_obj =
+               container_of(kobj, struct ccache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(ccache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t ccache_orangefs_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf,
+                                         size_t len)
+{
+       struct ccache_orangefs_attribute *attribute;
+       struct ccache_orangefs_obj *ccache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "ccache_orangefs_attr_store: start\n");
+
+       attribute =
+               container_of(attr, struct ccache_orangefs_attribute, attr);
+       ccache_orangefs_obj =
+               container_of(kobj, struct ccache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(ccache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops ccache_orangefs_sysfs_ops = {
+       .show = ccache_orangefs_attr_show,
+       .store = ccache_orangefs_attr_store,
+};
+
+static ssize_t ncache_orangefs_attr_show(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        char *buf)
+{
+       struct ncache_orangefs_attribute *attribute;
+       struct ncache_orangefs_obj *ncache_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct ncache_orangefs_attribute, attr);
+       ncache_orangefs_obj =
+               container_of(kobj, struct ncache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(ncache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t ncache_orangefs_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf,
+                                         size_t len)
+{
+       struct ncache_orangefs_attribute *attribute;
+       struct ncache_orangefs_obj *ncache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "ncache_orangefs_attr_store: start\n");
+
+       attribute = container_of(attr, struct ncache_orangefs_attribute, attr);
+       ncache_orangefs_obj =
+               container_of(kobj, struct ncache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(ncache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops ncache_orangefs_sysfs_ops = {
+       .show = ncache_orangefs_attr_show,
+       .store = ncache_orangefs_attr_store,
+};
+
+static ssize_t pc_orangefs_attr_show(struct kobject *kobj,
+                                    struct attribute *attr,
+                                    char *buf)
+{
+       struct pc_orangefs_attribute *attribute;
+       struct pc_orangefs_obj *pc_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct pc_orangefs_attribute, attr);
+       pc_orangefs_obj =
+               container_of(kobj, struct pc_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(pc_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops pc_orangefs_sysfs_ops = {
+       .show = pc_orangefs_attr_show,
+};
+
+static ssize_t stats_orangefs_attr_show(struct kobject *kobj,
+                                       struct attribute *attr,
+                                       char *buf)
+{
+       struct stats_orangefs_attribute *attribute;
+       struct stats_orangefs_obj *stats_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct stats_orangefs_attribute, attr);
+       stats_orangefs_obj =
+               container_of(kobj, struct stats_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(stats_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops stats_orangefs_sysfs_ops = {
+       .show = stats_orangefs_attr_show,
+};
+
+static void orangefs_release(struct kobject *kobj)
+{
+       struct orangefs_obj *orangefs_obj;
+
+       orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
+       kfree(orangefs_obj);
+}
+
+static void acache_orangefs_release(struct kobject *kobj)
+{
+       struct acache_orangefs_obj *acache_orangefs_obj;
+
+       acache_orangefs_obj =
+               container_of(kobj, struct acache_orangefs_obj, kobj);
+       kfree(acache_orangefs_obj);
+}
+
+static void capcache_orangefs_release(struct kobject *kobj)
+{
+       struct capcache_orangefs_obj *capcache_orangefs_obj;
+
+       capcache_orangefs_obj =
+               container_of(kobj, struct capcache_orangefs_obj, kobj);
+       kfree(capcache_orangefs_obj);
+}
+
+static void ccache_orangefs_release(struct kobject *kobj)
+{
+       struct ccache_orangefs_obj *ccache_orangefs_obj;
+
+       ccache_orangefs_obj =
+               container_of(kobj, struct ccache_orangefs_obj, kobj);
+       kfree(ccache_orangefs_obj);
+}
+
+static void ncache_orangefs_release(struct kobject *kobj)
+{
+       struct ncache_orangefs_obj *ncache_orangefs_obj;
+
+       ncache_orangefs_obj =
+               container_of(kobj, struct ncache_orangefs_obj, kobj);
+       kfree(ncache_orangefs_obj);
+}
+
+static void pc_orangefs_release(struct kobject *kobj)
+{
+       struct pc_orangefs_obj *pc_orangefs_obj;
+
+       pc_orangefs_obj =
+               container_of(kobj, struct pc_orangefs_obj, kobj);
+       kfree(pc_orangefs_obj);
+}
+
+static void stats_orangefs_release(struct kobject *kobj)
+{
+       struct stats_orangefs_obj *stats_orangefs_obj;
+
+       stats_orangefs_obj =
+               container_of(kobj, struct stats_orangefs_obj, kobj);
+       kfree(stats_orangefs_obj);
+}
+
+static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr)
+{
+       int rc = -EIO;
+       struct orangefs_attribute *orangefs_attr;
+       struct stats_orangefs_attribute *stats_orangefs_attr;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj_id);
+
+       if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
+               orangefs_attr = (struct orangefs_attribute *)attr;
+
+               if (!strcmp(orangefs_attr->attr.name, "op_timeout_secs")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%d\n",
+                                      op_timeout_secs);
+                       goto out;
+               } else if (!strcmp(orangefs_attr->attr.name,
+                                  "slot_timeout_secs")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%d\n",
+                                      slot_timeout_secs);
+                       goto out;
+               } else {
+                       goto out;
+               }
+
+       } else if (!strcmp(kobj_id, STATS_KOBJ_ID)) {
+               stats_orangefs_attr = (struct stats_orangefs_attribute *)attr;
+
+               if (!strcmp(stats_orangefs_attr->attr.name, "reads")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%lu\n",
+                                      g_orangefs_stats.reads);
+                       goto out;
+               } else if (!strcmp(stats_orangefs_attr->attr.name, "writes")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%lu\n",
+                                      g_orangefs_stats.writes);
+                       goto out;
+               } else {
+                       goto out;
+               }
+       }
+
+out:
+
+       return rc;
+}
+
+static ssize_t int_orangefs_show(struct orangefs_obj *orangefs_obj,
+                                struct orangefs_attribute *attr,
+                                char *buf)
+{
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "int_orangefs_show:start attr->attr.name:%s:\n",
+                    attr->attr.name);
+
+       rc = sysfs_int_show(ORANGEFS_KOBJ_ID, buf, (void *) attr);
+
+       return rc;
+}
+
+static ssize_t int_stats_show(struct stats_orangefs_obj *stats_orangefs_obj,
+                       struct stats_orangefs_attribute *attr,
+                       char *buf)
+{
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "int_stats_show:start attr->attr.name:%s:\n",
+                    attr->attr.name);
+
+       rc = sysfs_int_show(STATS_KOBJ_ID, buf, (void *) attr);
+
+       return rc;
+}
+
+static ssize_t int_store(struct orangefs_obj *orangefs_obj,
+                        struct orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count)
+{
+       int rc = 0;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "int_store: start attr->attr.name:%s: buf:%s:\n",
+                    attr->attr.name, buf);
+
+       if (!strcmp(attr->attr.name, "op_timeout_secs")) {
+               rc = kstrtoint(buf, 0, &op_timeout_secs);
+               goto out;
+       } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) {
+               rc = kstrtoint(buf, 0, &slot_timeout_secs);
+               goto out;
+       } else {
+               goto out;
+       }
+
+out:
+       if (rc)
+               rc = -EINVAL;
+       else
+               rc = count;
+
+       return rc;
+}
+
+/*
+ * obtain attribute values from userspace with a service operation.
+ */
+static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr)
+{
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int rc = 0;
+       char *ser_op_type = NULL;
+       struct orangefs_attribute *orangefs_attr;
+       struct acache_orangefs_attribute *acache_attr;
+       struct capcache_orangefs_attribute *capcache_attr;
+       struct ccache_orangefs_attribute *ccache_attr;
+       struct ncache_orangefs_attribute *ncache_attr;
+       struct pc_orangefs_attribute *pc_attr;
+       __u32 op_alloc_type;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "sysfs_service_op_show: id:%s:\n",
+                    kobj_id);
+
+       if (strcmp(kobj_id, PC_KOBJ_ID))
+               op_alloc_type = ORANGEFS_VFS_OP_PARAM;
+       else
+               op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT;
+
+       new_op = op_alloc(op_alloc_type);
+       if (!new_op)
+               return -ENOMEM;
+
+       /* Can't do a service_operation if the client is not running... */
+       rc = is_daemon_in_service();
+       if (rc) {
+               pr_info("%s: Client not running :%d:\n",
+                       __func__,
+                       is_daemon_in_service());
+               goto out;
+       }
+
+       if (strcmp(kobj_id, PC_KOBJ_ID))
+               new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET;
+
+       if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
+               orangefs_attr = (struct orangefs_attribute *)attr;
+
+               if (!strcmp(orangefs_attr->attr.name, "perf_history_size"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE;
+               else if (!strcmp(orangefs_attr->attr.name,
+                                "perf_time_interval_secs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS;
+               else if (!strcmp(orangefs_attr->attr.name,
+                                "perf_counter_reset"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_RESET;
+
+       } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) {
+               acache_attr = (struct acache_orangefs_attribute *)attr;
+
+               if (!strcmp(acache_attr->attr.name, "timeout_msecs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS;
+
+               if (!strcmp(acache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT;
+
+               if (!strcmp(acache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT;
+
+               if (!strcmp(acache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) {
+               capcache_attr = (struct capcache_orangefs_attribute *)attr;
+
+               if (!strcmp(capcache_attr->attr.name, "timeout_secs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS;
+
+               if (!strcmp(capcache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT;
+
+               if (!strcmp(capcache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT;
+
+               if (!strcmp(capcache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) {
+               ccache_attr = (struct ccache_orangefs_attribute *)attr;
+
+               if (!strcmp(ccache_attr->attr.name, "timeout_secs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS;
+
+               if (!strcmp(ccache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT;
+
+               if (!strcmp(ccache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT;
+
+               if (!strcmp(ccache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) {
+               ncache_attr = (struct ncache_orangefs_attribute *)attr;
+
+               if (!strcmp(ncache_attr->attr.name, "timeout_msecs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS;
+
+               if (!strcmp(ncache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT;
+
+               if (!strcmp(ncache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT;
+
+               if (!strcmp(ncache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, PC_KOBJ_ID)) {
+               pc_attr = (struct pc_orangefs_attribute *)attr;
+
+               if (!strcmp(pc_attr->attr.name, ACACHE_KOBJ_ID))
+                       new_op->upcall.req.perf_count.type =
+                               ORANGEFS_PERF_COUNT_REQUEST_ACACHE;
+
+               if (!strcmp(pc_attr->attr.name, CAPCACHE_KOBJ_ID))
+                       new_op->upcall.req.perf_count.type =
+                               ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE;
+
+               if (!strcmp(pc_attr->attr.name, NCACHE_KOBJ_ID))
+                       new_op->upcall.req.perf_count.type =
+                               ORANGEFS_PERF_COUNT_REQUEST_NCACHE;
+
+       } else {
+               gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n",
+                          kobj_id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+
+       if (strcmp(kobj_id, PC_KOBJ_ID))
+               ser_op_type = "orangefs_param";
+       else
+               ser_op_type = "orangefs_perf_count";
+
+       /*
+        * The service_operation will return an errno return code on
+        * error, and zero on success.
+        */
+       rc = service_operation(new_op, ser_op_type, ORANGEFS_OP_INTERRUPTIBLE);
+
+out:
+       if (!rc) {
+               if (strcmp(kobj_id, PC_KOBJ_ID)) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%d\n",
+                                      (int)new_op->downcall.resp.param.value);
+               } else {
+                       rc = scnprintf(
+                               buf,
+                               PAGE_SIZE,
+                               "%s",
+                               new_op->downcall.resp.perf_count.buffer);
+               }
+       }
+
+       op_release(new_op);
+
+       return rc;
+
+}
+
+static ssize_t service_orangefs_show(struct orangefs_obj *orangefs_obj,
+                                    struct orangefs_attribute *attr,
+                                    char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(ORANGEFS_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t
+       service_acache_show(struct acache_orangefs_obj *acache_orangefs_obj,
+                           struct acache_orangefs_attribute *attr,
+                           char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(ACACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t service_capcache_show(struct capcache_orangefs_obj
+                                       *capcache_orangefs_obj,
+                                    struct capcache_orangefs_attribute *attr,
+                                    char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(CAPCACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t service_ccache_show(struct ccache_orangefs_obj
+                                       *ccache_orangefs_obj,
+                                  struct ccache_orangefs_attribute *attr,
+                                  char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(CCACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t
+       service_ncache_show(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                           struct ncache_orangefs_attribute *attr,
+                           char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(NCACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t
+       service_pc_show(struct pc_orangefs_obj *pc_orangefs_obj,
+                           struct pc_orangefs_attribute *attr,
+                           char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(PC_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+/*
+ * pass attribute values back to userspace with a service operation.
+ *
+ * We have to do a memory allocation, an sscanf and a service operation.
+ * And we have to evaluate what the user entered, to make sure the
+ * value is within the range supported by the attribute. So, there's
+ * a lot of return code checking and mapping going on here.
+ *
+ * We want to return 1 if we think everything went OK, and
+ * EINVAL if not.
+ */
+static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
+{
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int val = 0;
+       int rc = 0;
+       struct orangefs_attribute *orangefs_attr;
+       struct acache_orangefs_attribute *acache_attr;
+       struct capcache_orangefs_attribute *capcache_attr;
+       struct ccache_orangefs_attribute *ccache_attr;
+       struct ncache_orangefs_attribute *ncache_attr;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "sysfs_service_op_store: id:%s:\n",
+                    kobj_id);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_PARAM);
+       if (!new_op)
+               return -EINVAL; /* sic */
+
+       /* Can't do a service_operation if the client is not running... */
+       rc = is_daemon_in_service();
+       if (rc) {
+               pr_info("%s: Client not running :%d:\n",
+                       __func__,
+                       is_daemon_in_service());
+               goto out;
+       }
+
+       /*
+        * The value we want to send back to userspace is in buf.
+        */
+       rc = kstrtoint(buf, 0, &val);
+       if (rc)
+               goto out;
+
+       if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
+               orangefs_attr = (struct orangefs_attribute *)attr;
+
+               if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) {
+                       if (val > 0) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(orangefs_attr->attr.name,
+                                  "perf_time_interval_secs")) {
+                       if (val > 0) {
+                               new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(orangefs_attr->attr.name,
+                                  "perf_counter_reset")) {
+                       if ((val == 0) || (val == 1)) {
+                               new_op->upcall.req.param.op =
+                                       ORANGEFS_PARAM_REQUEST_OP_PERF_RESET;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) {
+               acache_attr = (struct acache_orangefs_attribute *)attr;
+
+               if (!strcmp(acache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(acache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(acache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(acache_attr->attr.name, "timeout_msecs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) {
+               capcache_attr = (struct capcache_orangefs_attribute *)attr;
+
+               if (!strcmp(capcache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(capcache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(capcache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(capcache_attr->attr.name, "timeout_secs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) {
+               ccache_attr = (struct ccache_orangefs_attribute *)attr;
+
+               if (!strcmp(ccache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ccache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ccache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ccache_attr->attr.name, "timeout_secs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) {
+               ncache_attr = (struct ncache_orangefs_attribute *)attr;
+
+               if (!strcmp(ncache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ncache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ncache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                       ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else {
+               gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n",
+                          kobj_id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
+
+       new_op->upcall.req.param.value = val;
+
+       /*
+        * The service_operation will return a errno return code on
+        * error, and zero on success.
+        */
+       rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE);
+
+       if (rc < 0) {
+               gossip_err("sysfs_service_op_store: service op returned:%d:\n",
+                       rc);
+               rc = 0;
+       } else {
+               rc = 1;
+       }
+
+out:
+       op_release(new_op);
+
+       if (rc == -ENOMEM || rc == 0)
+               rc = -EINVAL;
+
+       return rc;
+}
+
+static ssize_t
+       service_orangefs_store(struct orangefs_obj *orangefs_obj,
+                              struct orangefs_attribute *attr,
+                              const char *buf,
+                              size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(ORANGEFS_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t
+       service_acache_store(struct acache_orangefs_obj *acache_orangefs_obj,
+                            struct acache_orangefs_attribute *attr,
+                            const char *buf,
+                            size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(ACACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t
+       service_capcache_store(struct capcache_orangefs_obj
+                               *capcache_orangefs_obj,
+                              struct capcache_orangefs_attribute *attr,
+                              const char *buf,
+                              size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(CAPCACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t service_ccache_store(struct ccache_orangefs_obj
+                                       *ccache_orangefs_obj,
+                                   struct ccache_orangefs_attribute *attr,
+                                   const char *buf,
+                                   size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(CCACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t
+       service_ncache_store(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                            struct ncache_orangefs_attribute *attr,
+                            const char *buf,
+                            size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(NCACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static struct orangefs_attribute op_timeout_secs_attribute =
+       __ATTR(op_timeout_secs, 0664, int_orangefs_show, int_store);
+
+static struct orangefs_attribute slot_timeout_secs_attribute =
+       __ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store);
+
+static struct orangefs_attribute perf_counter_reset_attribute =
+       __ATTR(perf_counter_reset,
+              0664,
+              service_orangefs_show,
+              service_orangefs_store);
+
+static struct orangefs_attribute perf_history_size_attribute =
+       __ATTR(perf_history_size,
+              0664,
+              service_orangefs_show,
+              service_orangefs_store);
+
+static struct orangefs_attribute perf_time_interval_secs_attribute =
+       __ATTR(perf_time_interval_secs,
+              0664,
+              service_orangefs_show,
+              service_orangefs_store);
+
+static struct attribute *orangefs_default_attrs[] = {
+       &op_timeout_secs_attribute.attr,
+       &slot_timeout_secs_attribute.attr,
+       &perf_counter_reset_attribute.attr,
+       &perf_history_size_attribute.attr,
+       &perf_time_interval_secs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type orangefs_ktype = {
+       .sysfs_ops = &orangefs_sysfs_ops,
+       .release = orangefs_release,
+       .default_attrs = orangefs_default_attrs,
+};
+
+static struct acache_orangefs_attribute acache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct acache_orangefs_attribute acache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct acache_orangefs_attribute acache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct acache_orangefs_attribute acache_timeout_msecs_attribute =
+       __ATTR(timeout_msecs,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct attribute *acache_orangefs_default_attrs[] = {
+       &acache_hard_limit_attribute.attr,
+       &acache_reclaim_percent_attribute.attr,
+       &acache_soft_limit_attribute.attr,
+       &acache_timeout_msecs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type acache_orangefs_ktype = {
+       .sysfs_ops = &acache_orangefs_sysfs_ops,
+       .release = acache_orangefs_release,
+       .default_attrs = acache_orangefs_default_attrs,
+};
+
+static struct capcache_orangefs_attribute capcache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct capcache_orangefs_attribute capcache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct capcache_orangefs_attribute capcache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct capcache_orangefs_attribute capcache_timeout_secs_attribute =
+       __ATTR(timeout_secs,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct attribute *capcache_orangefs_default_attrs[] = {
+       &capcache_hard_limit_attribute.attr,
+       &capcache_reclaim_percent_attribute.attr,
+       &capcache_soft_limit_attribute.attr,
+       &capcache_timeout_secs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type capcache_orangefs_ktype = {
+       .sysfs_ops = &capcache_orangefs_sysfs_ops,
+       .release = capcache_orangefs_release,
+       .default_attrs = capcache_orangefs_default_attrs,
+};
+
+static struct ccache_orangefs_attribute ccache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct ccache_orangefs_attribute ccache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct ccache_orangefs_attribute ccache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct ccache_orangefs_attribute ccache_timeout_secs_attribute =
+       __ATTR(timeout_secs,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct attribute *ccache_orangefs_default_attrs[] = {
+       &ccache_hard_limit_attribute.attr,
+       &ccache_reclaim_percent_attribute.attr,
+       &ccache_soft_limit_attribute.attr,
+       &ccache_timeout_secs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type ccache_orangefs_ktype = {
+       .sysfs_ops = &ccache_orangefs_sysfs_ops,
+       .release = ccache_orangefs_release,
+       .default_attrs = ccache_orangefs_default_attrs,
+};
+
+static struct ncache_orangefs_attribute ncache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct ncache_orangefs_attribute ncache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct ncache_orangefs_attribute ncache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct ncache_orangefs_attribute ncache_timeout_msecs_attribute =
+       __ATTR(timeout_msecs,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct attribute *ncache_orangefs_default_attrs[] = {
+       &ncache_hard_limit_attribute.attr,
+       &ncache_reclaim_percent_attribute.attr,
+       &ncache_soft_limit_attribute.attr,
+       &ncache_timeout_msecs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type ncache_orangefs_ktype = {
+       .sysfs_ops = &ncache_orangefs_sysfs_ops,
+       .release = ncache_orangefs_release,
+       .default_attrs = ncache_orangefs_default_attrs,
+};
+
+static struct pc_orangefs_attribute pc_acache_attribute =
+       __ATTR(acache,
+              0664,
+              service_pc_show,
+              NULL);
+
+static struct pc_orangefs_attribute pc_capcache_attribute =
+       __ATTR(capcache,
+              0664,
+              service_pc_show,
+              NULL);
+
+static struct pc_orangefs_attribute pc_ncache_attribute =
+       __ATTR(ncache,
+              0664,
+              service_pc_show,
+              NULL);
+
+static struct attribute *pc_orangefs_default_attrs[] = {
+       &pc_acache_attribute.attr,
+       &pc_capcache_attribute.attr,
+       &pc_ncache_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type pc_orangefs_ktype = {
+       .sysfs_ops = &pc_orangefs_sysfs_ops,
+       .release = pc_orangefs_release,
+       .default_attrs = pc_orangefs_default_attrs,
+};
+
+static struct stats_orangefs_attribute stats_reads_attribute =
+       __ATTR(reads,
+              0664,
+              int_stats_show,
+              NULL);
+
+static struct stats_orangefs_attribute stats_writes_attribute =
+       __ATTR(writes,
+              0664,
+              int_stats_show,
+              NULL);
+
+static struct attribute *stats_orangefs_default_attrs[] = {
+       &stats_reads_attribute.attr,
+       &stats_writes_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type stats_orangefs_ktype = {
+       .sysfs_ops = &stats_orangefs_sysfs_ops,
+       .release = stats_orangefs_release,
+       .default_attrs = stats_orangefs_default_attrs,
+};
+
+static struct orangefs_obj *orangefs_obj;
+static struct acache_orangefs_obj *acache_orangefs_obj;
+static struct capcache_orangefs_obj *capcache_orangefs_obj;
+static struct ccache_orangefs_obj *ccache_orangefs_obj;
+static struct ncache_orangefs_obj *ncache_orangefs_obj;
+static struct pc_orangefs_obj *pc_orangefs_obj;
+static struct stats_orangefs_obj *stats_orangefs_obj;
+
+int orangefs_sysfs_init(void)
+{
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_init: start\n");
+
+       /* create /sys/fs/orangefs. */
+       orangefs_obj = kzalloc(sizeof(*orangefs_obj), GFP_KERNEL);
+       if (!orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&orangefs_obj->kobj,
+                                 &orangefs_ktype,
+                                 fs_kobj,
+                                 ORANGEFS_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/acache. */
+       acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL);
+       if (!acache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&acache_orangefs_obj->kobj,
+                                 &acache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 ACACHE_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&acache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&acache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/capcache. */
+       capcache_orangefs_obj =
+               kzalloc(sizeof(*capcache_orangefs_obj), GFP_KERNEL);
+       if (!capcache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&capcache_orangefs_obj->kobj,
+                                 &capcache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 CAPCACHE_KOBJ_ID);
+       if (rc) {
+               kobject_put(&capcache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&capcache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/ccache. */
+       ccache_orangefs_obj =
+               kzalloc(sizeof(*ccache_orangefs_obj), GFP_KERNEL);
+       if (!ccache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&ccache_orangefs_obj->kobj,
+                                 &ccache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 CCACHE_KOBJ_ID);
+       if (rc) {
+               kobject_put(&ccache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&ccache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/ncache. */
+       ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL);
+       if (!ncache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&ncache_orangefs_obj->kobj,
+                                 &ncache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 NCACHE_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&ncache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&ncache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/perf_counters. */
+       pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL);
+       if (!pc_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&pc_orangefs_obj->kobj,
+                                 &pc_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 "perf_counters");
+
+       if (rc) {
+               kobject_put(&pc_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&pc_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/stats. */
+       stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL);
+       if (!stats_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&stats_orangefs_obj->kobj,
+                                 &stats_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 STATS_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&stats_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&stats_orangefs_obj->kobj, KOBJ_ADD);
+out:
+       return rc;
+}
+
+void orangefs_sysfs_exit(void)
+{
+       gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n");
+
+       kobject_put(&acache_orangefs_obj->kobj);
+       kobject_put(&capcache_orangefs_obj->kobj);
+       kobject_put(&ccache_orangefs_obj->kobj);
+       kobject_put(&ncache_orangefs_obj->kobj);
+       kobject_put(&pc_orangefs_obj->kobj);
+       kobject_put(&stats_orangefs_obj->kobj);
+
+       kobject_put(&orangefs_obj->kobj);
+}
diff --git a/fs/orangefs/orangefs-sysfs.h b/fs/orangefs/orangefs-sysfs.h
new file mode 100644 (file)
index 0000000..f0b7638
--- /dev/null
@@ -0,0 +1,2 @@
+extern int orangefs_sysfs_init(void);
+extern void orangefs_sysfs_exit(void);
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
new file mode 100644 (file)
index 0000000..fa3ed8a
--- /dev/null
@@ -0,0 +1,1263 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-dev-proto.h"
+#include "orangefs-bufmap.h"
+
+__s32 fsid_of_op(struct orangefs_kernel_op_s *op)
+{
+       __s32 fsid = ORANGEFS_FS_ID_NULL;
+
+       if (op) {
+               switch (op->upcall.type) {
+               case ORANGEFS_VFS_OP_FILE_IO:
+                       fsid = op->upcall.req.io.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_LOOKUP:
+                       fsid = op->upcall.req.lookup.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_CREATE:
+                       fsid = op->upcall.req.create.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_GETATTR:
+                       fsid = op->upcall.req.getattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_REMOVE:
+                       fsid = op->upcall.req.remove.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_MKDIR:
+                       fsid = op->upcall.req.mkdir.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_READDIR:
+                       fsid = op->upcall.req.readdir.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_SETATTR:
+                       fsid = op->upcall.req.setattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_SYMLINK:
+                       fsid = op->upcall.req.sym.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_RENAME:
+                       fsid = op->upcall.req.rename.old_parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_STATFS:
+                       fsid = op->upcall.req.statfs.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_TRUNCATE:
+                       fsid = op->upcall.req.truncate.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_MMAP_RA_FLUSH:
+                       fsid = op->upcall.req.ra_cache_flush.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_FS_UMOUNT:
+                       fsid = op->upcall.req.fs_umount.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_GETXATTR:
+                       fsid = op->upcall.req.getxattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_SETXATTR:
+                       fsid = op->upcall.req.setxattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_LISTXATTR:
+                       fsid = op->upcall.req.listxattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_REMOVEXATTR:
+                       fsid = op->upcall.req.removexattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_FSYNC:
+                       fsid = op->upcall.req.fsync.refn.fs_id;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return fsid;
+}
+
+static int orangefs_inode_flags(struct ORANGEFS_sys_attr_s *attrs)
+{
+       int flags = 0;
+       if (attrs->flags & ORANGEFS_IMMUTABLE_FL)
+               flags |= S_IMMUTABLE;
+       else
+               flags &= ~S_IMMUTABLE;
+       if (attrs->flags & ORANGEFS_APPEND_FL)
+               flags |= S_APPEND;
+       else
+               flags &= ~S_APPEND;
+       if (attrs->flags & ORANGEFS_NOATIME_FL)
+               flags |= S_NOATIME;
+       else
+               flags &= ~S_NOATIME;
+       return flags;
+}
+
+static int orangefs_inode_perms(struct ORANGEFS_sys_attr_s *attrs)
+{
+       int perm_mode = 0;
+
+       if (attrs->perms & ORANGEFS_O_EXECUTE)
+               perm_mode |= S_IXOTH;
+       if (attrs->perms & ORANGEFS_O_WRITE)
+               perm_mode |= S_IWOTH;
+       if (attrs->perms & ORANGEFS_O_READ)
+               perm_mode |= S_IROTH;
+
+       if (attrs->perms & ORANGEFS_G_EXECUTE)
+               perm_mode |= S_IXGRP;
+       if (attrs->perms & ORANGEFS_G_WRITE)
+               perm_mode |= S_IWGRP;
+       if (attrs->perms & ORANGEFS_G_READ)
+               perm_mode |= S_IRGRP;
+
+       if (attrs->perms & ORANGEFS_U_EXECUTE)
+               perm_mode |= S_IXUSR;
+       if (attrs->perms & ORANGEFS_U_WRITE)
+               perm_mode |= S_IWUSR;
+       if (attrs->perms & ORANGEFS_U_READ)
+               perm_mode |= S_IRUSR;
+
+       if (attrs->perms & ORANGEFS_G_SGID)
+               perm_mode |= S_ISGID;
+       if (attrs->perms & ORANGEFS_U_SUID)
+               perm_mode |= S_ISUID;
+
+       return perm_mode;
+}
+
+/* NOTE: symname is ignored unless the inode is a sym link */
+static int copy_attributes_to_inode(struct inode *inode,
+                                   struct ORANGEFS_sys_attr_s *attrs,
+                                   char *symname)
+{
+       int ret = -1;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       loff_t inode_size = 0;
+       loff_t rounded_up_size = 0;
+
+
+       /*
+        * arbitrarily set the inode block size; FIXME: we need to
+        * resolve the difference between the reported inode blocksize
+        * and the PAGE_CACHE_SIZE, since our block count will always
+        * be wrong.
+        *
+        * For now, we're setting the block count to be the proper
+        * number assuming the block size is 512 bytes, and the size is
+        * rounded up to the nearest 4K.  This is apparently required
+        * to get proper size reports from the 'du' shell utility.
+        *
+        * changing the inode->i_blkbits to something other than
+        * PAGE_CACHE_SHIFT breaks mmap/execution as we depend on that.
+        */
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "attrs->mask = %x (objtype = %s)\n",
+                    attrs->mask,
+                    attrs->objtype == ORANGEFS_TYPE_METAFILE ? "file" :
+                    attrs->objtype == ORANGEFS_TYPE_DIRECTORY ? "directory" :
+                    attrs->objtype == ORANGEFS_TYPE_SYMLINK ? "symlink" :
+                       "invalid/unknown");
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               inode->i_flags = orangefs_inode_flags(attrs);
+               if (attrs->mask & ORANGEFS_ATTR_SYS_SIZE) {
+                       inode_size = (loff_t) attrs->size;
+                       rounded_up_size =
+                           (inode_size + (4096 - (inode_size % 4096)));
+
+                       orangefs_lock_inode(inode);
+                       inode->i_bytes = inode_size;
+                       inode->i_blocks =
+                           (unsigned long)(rounded_up_size / 512);
+                       orangefs_unlock_inode(inode);
+
+                       /*
+                        * NOTE: make sure all the places we're called
+                        * from have the inode->i_sem lock. We're fine
+                        * in 99% of the cases since we're mostly
+                        * called from a lookup.
+                        */
+                       inode->i_size = inode_size;
+               }
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               if (symname != NULL) {
+                       inode->i_size = (loff_t) strlen(symname);
+                       break;
+               }
+               /*FALLTHRU*/
+       default:
+               inode->i_size = PAGE_CACHE_SIZE;
+
+               orangefs_lock_inode(inode);
+               inode_set_bytes(inode, inode->i_size);
+               orangefs_unlock_inode(inode);
+               break;
+       }
+
+       inode->i_uid = make_kuid(&init_user_ns, attrs->owner);
+       inode->i_gid = make_kgid(&init_user_ns, attrs->group);
+       inode->i_atime.tv_sec = (time_t) attrs->atime;
+       inode->i_mtime.tv_sec = (time_t) attrs->mtime;
+       inode->i_ctime.tv_sec = (time_t) attrs->ctime;
+       inode->i_atime.tv_nsec = 0;
+       inode->i_mtime.tv_nsec = 0;
+       inode->i_ctime.tv_nsec = 0;
+
+       inode->i_mode = orangefs_inode_perms(attrs);
+
+       if (is_root_handle(inode)) {
+               /* special case: mark the root inode as sticky */
+               inode->i_mode |= S_ISVTX;
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "Marking inode %pU as sticky\n",
+                            get_khandle_from_ino(inode));
+       }
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               inode->i_mode |= S_IFREG;
+               ret = 0;
+               break;
+       case ORANGEFS_TYPE_DIRECTORY:
+               inode->i_mode |= S_IFDIR;
+               /* NOTE: we have no good way to keep nlink consistent
+                * for directories across clients; keep constant at 1.
+                * Why 1?  If we go with 2, then find(1) gets confused
+                * and won't work properly withouth the -noleaf option
+                */
+               set_nlink(inode, 1);
+               ret = 0;
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               inode->i_mode |= S_IFLNK;
+
+               /* copy link target to inode private data */
+               if (orangefs_inode && symname) {
+                       strncpy(orangefs_inode->link_target,
+                               symname,
+                               ORANGEFS_NAME_MAX);
+                       gossip_debug(GOSSIP_UTILS_DEBUG,
+                                    "Copied attr link target %s\n",
+                                    orangefs_inode->link_target);
+               }
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "symlink mode %o\n",
+                            inode->i_mode);
+               ret = 0;
+               break;
+       default:
+               gossip_err("orangefs: copy_attributes_to_inode: got invalid attribute type %x\n",
+                       attrs->objtype);
+       }
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs: copy_attributes_to_inode: setting i_mode to %o, i_size to %lu\n",
+                    inode->i_mode,
+                    (unsigned long)i_size_read(inode));
+
+       return ret;
+}
+
+/*
+ * NOTE: in kernel land, we never use the sys_attr->link_target for
+ * anything, so don't bother copying it into the sys_attr object here.
+ */
+static inline int copy_attributes_from_inode(struct inode *inode,
+                                            struct ORANGEFS_sys_attr_s *attrs,
+                                            struct iattr *iattr)
+{
+       umode_t tmp_mode;
+
+       if (!iattr || !inode || !attrs) {
+               gossip_err("NULL iattr (%p), inode (%p), attrs (%p) "
+                          "in copy_attributes_from_inode!\n",
+                          iattr,
+                          inode,
+                          attrs);
+               return -EINVAL;
+       }
+       /*
+        * We need to be careful to only copy the attributes out of the
+        * iattr object that we know are valid.
+        */
+       attrs->mask = 0;
+       if (iattr->ia_valid & ATTR_UID) {
+               attrs->owner = from_kuid(current_user_ns(), iattr->ia_uid);
+               attrs->mask |= ORANGEFS_ATTR_SYS_UID;
+               gossip_debug(GOSSIP_UTILS_DEBUG, "(UID) %d\n", attrs->owner);
+       }
+       if (iattr->ia_valid & ATTR_GID) {
+               attrs->group = from_kgid(current_user_ns(), iattr->ia_gid);
+               attrs->mask |= ORANGEFS_ATTR_SYS_GID;
+               gossip_debug(GOSSIP_UTILS_DEBUG, "(GID) %d\n", attrs->group);
+       }
+
+       if (iattr->ia_valid & ATTR_ATIME) {
+               attrs->mask |= ORANGEFS_ATTR_SYS_ATIME;
+               if (iattr->ia_valid & ATTR_ATIME_SET) {
+                       attrs->atime =
+                           orangefs_convert_time_field(&iattr->ia_atime);
+                       attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET;
+               }
+       }
+       if (iattr->ia_valid & ATTR_MTIME) {
+               attrs->mask |= ORANGEFS_ATTR_SYS_MTIME;
+               if (iattr->ia_valid & ATTR_MTIME_SET) {
+                       attrs->mtime =
+                           orangefs_convert_time_field(&iattr->ia_mtime);
+                       attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET;
+               }
+       }
+       if (iattr->ia_valid & ATTR_CTIME)
+               attrs->mask |= ORANGEFS_ATTR_SYS_CTIME;
+
+       /*
+        * ORANGEFS cannot set size with a setattr operation.  Probably not likely
+        * to be requested through the VFS, but just in case, don't worry about
+        * ATTR_SIZE
+        */
+
+       if (iattr->ia_valid & ATTR_MODE) {
+               tmp_mode = iattr->ia_mode;
+               if (tmp_mode & (S_ISVTX)) {
+                       if (is_root_handle(inode)) {
+                               /*
+                                * allow sticky bit to be set on root (since
+                                * it shows up that way by default anyhow),
+                                * but don't show it to the server
+                                */
+                               tmp_mode -= S_ISVTX;
+                       } else {
+                               gossip_debug(GOSSIP_UTILS_DEBUG,
+                                            "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
+                               return -EINVAL;
+                       }
+               }
+
+               if (tmp_mode & (S_ISUID)) {
+                       gossip_debug(GOSSIP_UTILS_DEBUG,
+                                    "Attempting to set setuid bit (not supported); returning EINVAL.\n");
+                       return -EINVAL;
+               }
+
+               attrs->perms = ORANGEFS_util_translate_mode(tmp_mode);
+               attrs->mask |= ORANGEFS_ATTR_SYS_PERM;
+       }
+
+       return 0;
+}
+
+static int compare_attributes_to_inode(struct inode *inode,
+                                      struct ORANGEFS_sys_attr_s *attrs,
+                                      char *symname,
+                                      int mask)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       loff_t inode_size, rounded_up_size;
+
+       /* Much of what happens below relies on the type being around. */
+       if (!(mask & ORANGEFS_ATTR_SYS_TYPE))
+               return 0;
+
+       if (attrs->objtype == ORANGEFS_TYPE_METAFILE &&
+           inode->i_flags != orangefs_inode_flags(attrs))
+               return 0;
+
+       /* Compare file size. */
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               if (mask & ORANGEFS_ATTR_SYS_SIZE) {
+                       inode_size = attrs->size;
+                       rounded_up_size = inode_size +
+                           (4096 - (inode_size % 4096));
+                       if (inode->i_bytes != inode_size ||
+                           inode->i_blocks != rounded_up_size/512)
+                               return 0;
+               }
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               if (mask & ORANGEFS_ATTR_SYS_SIZE)
+                       if (symname && strlen(symname) != inode->i_size)
+                               return 0;
+               break;
+       default:
+               if (inode->i_size != PAGE_CACHE_SIZE &&
+                   inode_get_bytes(inode) != PAGE_CACHE_SIZE)
+                       return 0;
+       }
+
+       /* Compare general attributes. */
+
+       if (mask & ORANGEFS_ATTR_SYS_UID &&
+           !uid_eq(inode->i_uid, make_kuid(&init_user_ns, attrs->owner)))
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_GID &&
+           !gid_eq(inode->i_gid, make_kgid(&init_user_ns, attrs->group)))
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_ATIME &&
+           inode->i_atime.tv_sec != attrs->atime)
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_MTIME &&
+           inode->i_atime.tv_sec != attrs->mtime)
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_CTIME &&
+           inode->i_atime.tv_sec != attrs->ctime)
+               return 0;
+       if (inode->i_atime.tv_nsec != 0 ||
+           inode->i_mtime.tv_nsec != 0 ||
+           inode->i_ctime.tv_nsec != 0)
+               return 0;
+
+       if (mask & ORANGEFS_ATTR_SYS_PERM &&
+           (inode->i_mode & ~(S_ISVTX|S_IFREG|S_IFDIR|S_IFLNK)) !=
+           orangefs_inode_perms(attrs))
+               return 0;
+
+       if (is_root_handle(inode))
+               if (!(inode->i_mode & S_ISVTX))
+                       return 0;
+
+       /* Compare file type. */
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               if (!(inode->i_mode & S_IFREG))
+                       return 0;
+               break;
+       case ORANGEFS_TYPE_DIRECTORY:
+               if (!(inode->i_mode & S_IFDIR))
+                       return 0;
+               if (inode->i_nlink != 1)
+                       return 0;
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               if (!(inode->i_mode & S_IFLNK))
+                       return 0;
+               if (orangefs_inode && symname &&
+                   mask & ORANGEFS_ATTR_SYS_LNK_TARGET)
+                       if (strcmp(orangefs_inode->link_target, symname))
+                               return 0;
+               break;
+       default:
+               gossip_err("orangefs: compare_attributes_to_inode: got invalid attribute type %x\n",
+                   attrs->objtype);
+
+       }
+
+       return 1;
+}
+
+/*
+ * Issues a orangefs getattr request and fills in the appropriate inode
+ * attributes if successful. When check is 0, returns 0 on success and -errno
+ * otherwise. When check is 1, returns 1 on success where the inode is valid
+ * and 0 on success where the inode is stale and -errno otherwise.
+ */
+int orangefs_inode_getattr(struct inode *inode, __u32 getattr_mask, int check)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "%s: called on inode %pU\n",
+                    __func__,
+                    get_khandle_from_ino(inode));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.getattr.refn = orangefs_inode->refn;
+       new_op->upcall.req.getattr.mask = getattr_mask;
+
+       ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+       if (ret != 0)
+               goto out;
+
+       if (check) {
+               ret = compare_attributes_to_inode(inode,
+                   &new_op->downcall.resp.getattr.attributes,
+                   new_op->downcall.resp.getattr.link_target,
+                   getattr_mask);
+
+               if (new_op->downcall.resp.getattr.attributes.objtype ==
+                   ORANGEFS_TYPE_METAFILE) {
+                       if (orangefs_inode->blksize !=
+                           new_op->downcall.resp.getattr.attributes.blksize)
+                               ret = 0;
+               } else {
+                       if (orangefs_inode->blksize != 1 << inode->i_blkbits)
+                               ret = 0;
+               }
+       } else {
+               if (copy_attributes_to_inode(inode,
+                               &new_op->downcall.resp.getattr.attributes,
+                               new_op->downcall.resp.getattr.link_target)) {
+                       gossip_err("%s: failed to copy attributes\n", __func__);
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               /*
+                * Store blksize in orangefs specific part of inode structure;
+                * we are only going to use this to report to stat to make sure
+                * it doesn't perturb any inode related code paths.
+                */
+               if (new_op->downcall.resp.getattr.attributes.objtype ==
+                               ORANGEFS_TYPE_METAFILE) {
+                       orangefs_inode->blksize = new_op->downcall.resp.
+                           getattr.attributes.blksize;
+               } else {
+                       /*
+                        * mimic behavior of generic_fillattr() for other file
+                        * types.
+                        */
+                       orangefs_inode->blksize = (1 << inode->i_blkbits);
+
+               }
+       }
+
+out:
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "Getattr on handle %pU, "
+                    "fsid %d\n  (inode ct = %d) returned %d\n",
+                    &orangefs_inode->refn.khandle,
+                    orangefs_inode->refn.fs_id,
+                    (int)atomic_read(&inode->i_count),
+                    ret);
+
+       op_release(new_op);
+       return ret;
+}
+
+/*
+ * issues a orangefs setattr request to make sure the new attribute values
+ * take effect if successful.  returns 0 on success; -errno otherwise
+ */
+int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       int ret;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_SETATTR);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.setattr.refn = orangefs_inode->refn;
+       ret = copy_attributes_from_inode(inode,
+                      &new_op->upcall.req.setattr.attributes,
+                      iattr);
+       if (ret >= 0) {
+               ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "orangefs_inode_setattr: returning %d\n",
+                            ret);
+       }
+
+       op_release(new_op);
+
+       /*
+        * successful setattr should clear the atime, mtime and
+        * ctime flags.
+        */
+       if (ret == 0) {
+               ClearAtimeFlag(orangefs_inode);
+               ClearMtimeFlag(orangefs_inode);
+               ClearCtimeFlag(orangefs_inode);
+               ClearModeFlag(orangefs_inode);
+       }
+
+       return ret;
+}
+
+int orangefs_flush_inode(struct inode *inode)
+{
+       /*
+        * If it is a dirty inode, this function gets called.
+        * Gather all the information that needs to be setattr'ed
+        * Right now, this will only be used for mode, atime, mtime
+        * and/or ctime.
+        */
+       struct iattr wbattr;
+       int ret;
+       int mtime_flag;
+       int ctime_flag;
+       int atime_flag;
+       int mode_flag;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+
+       memset(&wbattr, 0, sizeof(wbattr));
+
+       /*
+        * check inode flags up front, and clear them if they are set.  This
+        * will prevent multiple processes from all trying to flush the same
+        * inode if they call close() simultaneously
+        */
+       mtime_flag = MtimeFlag(orangefs_inode);
+       ClearMtimeFlag(orangefs_inode);
+       ctime_flag = CtimeFlag(orangefs_inode);
+       ClearCtimeFlag(orangefs_inode);
+       atime_flag = AtimeFlag(orangefs_inode);
+       ClearAtimeFlag(orangefs_inode);
+       mode_flag = ModeFlag(orangefs_inode);
+       ClearModeFlag(orangefs_inode);
+
+       /*  -- Lazy atime,mtime and ctime update --
+        * Note: all times are dictated by server in the new scheme
+        * and not by the clients
+        *
+        * Also mode updates are being handled now..
+        */
+
+       if (mtime_flag)
+               wbattr.ia_valid |= ATTR_MTIME;
+       if (ctime_flag)
+               wbattr.ia_valid |= ATTR_CTIME;
+       if (atime_flag)
+               wbattr.ia_valid |= ATTR_ATIME;
+
+       if (mode_flag) {
+               wbattr.ia_mode = inode->i_mode;
+               wbattr.ia_valid |= ATTR_MODE;
+       }
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "*********** orangefs_flush_inode: %pU "
+                    "(ia_valid %d)\n",
+                    get_khandle_from_ino(inode),
+                    wbattr.ia_valid);
+       if (wbattr.ia_valid == 0) {
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "orangefs_flush_inode skipping setattr()\n");
+               return 0;
+       }
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_flush_inode (%pU) writing mode %o\n",
+                    get_khandle_from_ino(inode),
+                    inode->i_mode);
+
+       ret = orangefs_inode_setattr(inode, &wbattr);
+
+       return ret;
+}
+
+int orangefs_unmount_sb(struct super_block *sb)
+{
+       int ret = -EINVAL;
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_unmount_sb called on sb %p\n",
+                    sb);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.fs_umount.id = ORANGEFS_SB(sb)->id;
+       new_op->upcall.req.fs_umount.fs_id = ORANGEFS_SB(sb)->fs_id;
+       strncpy(new_op->upcall.req.fs_umount.orangefs_config_server,
+               ORANGEFS_SB(sb)->devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "Attempting ORANGEFS Unmount via host %s\n",
+                    new_op->upcall.req.fs_umount.orangefs_config_server);
+
+       ret = service_operation(new_op, "orangefs_fs_umount", 0);
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_unmount: got return value of %d\n", ret);
+       if (ret)
+               sb = ERR_PTR(ret);
+       else
+               ORANGEFS_SB(sb)->mount_pending = 1;
+
+       op_release(new_op);
+       return ret;
+}
+
+/*
+ * NOTE: on successful cancellation, be sure to return -EINTR, as
+ * that's the return value the caller expects
+ */
+int orangefs_cancel_op_in_progress(__u64 tag)
+{
+       int ret = -EINVAL;
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_cancel_op_in_progress called on tag %llu\n",
+                    llu(tag));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_CANCEL);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.cancel.op_tag = tag;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "Attempting ORANGEFS operation cancellation of tag %llu\n",
+                    llu(new_op->upcall.req.cancel.op_tag));
+
+       ret = service_operation(new_op, "orangefs_cancel", ORANGEFS_OP_CANCELLATION);
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_cancel_op_in_progress: got return value of %d\n",
+                    ret);
+
+       op_release(new_op);
+       return ret;
+}
+
+void orangefs_make_bad_inode(struct inode *inode)
+{
+       if (is_root_handle(inode)) {
+               /*
+                * if this occurs, the pvfs2-client-core was killed but we
+                * can't afford to lose the inode operations and such
+                * associated with the root handle in any case.
+                */
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "*** NOT making bad root inode %pU\n",
+                            get_khandle_from_ino(inode));
+       } else {
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "*** making bad inode %pU\n",
+                            get_khandle_from_ino(inode));
+               make_bad_inode(inode);
+       }
+}
+
+/* Block all blockable signals... */
+void orangefs_block_signals(sigset_t *orig_sigset)
+{
+       sigset_t mask;
+
+       /*
+        * Initialize all entries in the signal set to the
+        * inverse of the given mask.
+        */
+       siginitsetinv(&mask, sigmask(SIGKILL));
+
+       /* Block 'em Danno... */
+       sigprocmask(SIG_BLOCK, &mask, orig_sigset);
+}
+
+/* set the signal mask to the given template... */
+void orangefs_set_signals(sigset_t *sigset)
+{
+       sigprocmask(SIG_SETMASK, sigset, NULL);
+}
+
+/*
+ * The following is a very dirty hack that is now a permanent part of the
+ * ORANGEFS protocol. See protocol.h for more error definitions.
+ */
+
+/* The order matches include/orangefs-types.h in the OrangeFS source. */
+static int PINT_errno_mapping[] = {
+       0, EPERM, ENOENT, EINTR, EIO, ENXIO, EBADF, EAGAIN, ENOMEM,
+       EFAULT, EBUSY, EEXIST, ENODEV, ENOTDIR, EISDIR, EINVAL, EMFILE,
+       EFBIG, ENOSPC, EROFS, EMLINK, EPIPE, EDEADLK, ENAMETOOLONG,
+       ENOLCK, ENOSYS, ENOTEMPTY, ELOOP, EWOULDBLOCK, ENOMSG, EUNATCH,
+       EBADR, EDEADLOCK, ENODATA, ETIME, ENONET, EREMOTE, ECOMM,
+       EPROTO, EBADMSG, EOVERFLOW, ERESTART, EMSGSIZE, EPROTOTYPE,
+       ENOPROTOOPT, EPROTONOSUPPORT, EOPNOTSUPP, EADDRINUSE,
+       EADDRNOTAVAIL, ENETDOWN, ENETUNREACH, ENETRESET, ENOBUFS,
+       ETIMEDOUT, ECONNREFUSED, EHOSTDOWN, EHOSTUNREACH, EALREADY,
+       EACCES, ECONNRESET, ERANGE
+};
+
+int orangefs_normalize_to_errno(__s32 error_code)
+{
+       __u32 i;
+
+       /* Success */
+       if (error_code == 0) {
+               return 0;
+       /*
+        * This shouldn't ever happen. If it does it should be fixed on the
+        * server.
+        */
+       } else if (error_code > 0) {
+               gossip_err("orangefs: error status receieved.\n");
+               gossip_err("orangefs: assuming error code is inverted.\n");
+               error_code = -error_code;
+       }
+
+       /*
+        * XXX: This is very bad since error codes from ORANGEFS may not be
+        * suitable for return into userspace.
+        */
+
+       /*
+        * Convert ORANGEFS error values into errno values suitable for return
+        * from the kernel.
+        */
+       if ((-error_code) & ORANGEFS_NON_ERRNO_ERROR_BIT) {
+               if (((-error_code) &
+                   (ORANGEFS_ERROR_NUMBER_BITS|ORANGEFS_NON_ERRNO_ERROR_BIT|
+                   ORANGEFS_ERROR_BIT)) == ORANGEFS_ECANCEL) {
+                       /*
+                        * cancellation error codes generally correspond to
+                        * a timeout from the client's perspective
+                        */
+                       error_code = -ETIMEDOUT;
+               } else {
+                       /* assume a default error code */
+                       gossip_err("orangefs: warning: got error code without errno equivalent: %d.\n", error_code);
+                       error_code = -EINVAL;
+               }
+
+       /* Convert ORANGEFS encoded errno values into regular errno values. */
+       } else if ((-error_code) & ORANGEFS_ERROR_BIT) {
+               i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS);
+               if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping))
+                       error_code = -PINT_errno_mapping[i];
+               else
+                       error_code = -EINVAL;
+
+       /*
+        * Only ORANGEFS protocol error codes should ever come here. Otherwise
+        * there is a bug somewhere.
+        */
+       } else {
+               gossip_err("orangefs: orangefs_normalize_to_errno: got error code which is not from ORANGEFS.\n");
+       }
+       return error_code;
+}
+
+#define NUM_MODES 11
+__s32 ORANGEFS_util_translate_mode(int mode)
+{
+       int ret = 0;
+       int i = 0;
+       static int modes[NUM_MODES] = {
+               S_IXOTH, S_IWOTH, S_IROTH,
+               S_IXGRP, S_IWGRP, S_IRGRP,
+               S_IXUSR, S_IWUSR, S_IRUSR,
+               S_ISGID, S_ISUID
+       };
+       static int orangefs_modes[NUM_MODES] = {
+               ORANGEFS_O_EXECUTE, ORANGEFS_O_WRITE, ORANGEFS_O_READ,
+               ORANGEFS_G_EXECUTE, ORANGEFS_G_WRITE, ORANGEFS_G_READ,
+               ORANGEFS_U_EXECUTE, ORANGEFS_U_WRITE, ORANGEFS_U_READ,
+               ORANGEFS_G_SGID, ORANGEFS_U_SUID
+       };
+
+       for (i = 0; i < NUM_MODES; i++)
+               if (mode & modes[i])
+                       ret |= orangefs_modes[i];
+
+       return ret;
+}
+#undef NUM_MODES
+
+/*
+ * After obtaining a string representation of the client's debug
+ * keywords and their associated masks, this function is called to build an
+ * array of these values.
+ */
+int orangefs_prepare_cdm_array(char *debug_array_string)
+{
+       int i;
+       int rc = -EINVAL;
+       char *cds_head = NULL;
+       char *cds_delimiter = NULL;
+       int keyword_len = 0;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       /*
+        * figure out how many elements the cdm_array needs.
+        */
+       for (i = 0; i < strlen(debug_array_string); i++)
+               if (debug_array_string[i] == '\n')
+                       cdm_element_count++;
+
+       if (!cdm_element_count) {
+               pr_info("No elements in client debug array string!\n");
+               goto out;
+       }
+
+       cdm_array =
+               kzalloc(cdm_element_count * sizeof(struct client_debug_mask),
+                       GFP_KERNEL);
+       if (!cdm_array) {
+               pr_info("malloc failed for cdm_array!\n");
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       cds_head = debug_array_string;
+
+       for (i = 0; i < cdm_element_count; i++) {
+               cds_delimiter = strchr(cds_head, '\n');
+               *cds_delimiter = '\0';
+
+               keyword_len = strcspn(cds_head, " ");
+
+               cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL);
+               if (!cdm_array[i].keyword) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               sscanf(cds_head,
+                      "%s %llx %llx",
+                      cdm_array[i].keyword,
+                      (unsigned long long *)&(cdm_array[i].mask1),
+                      (unsigned long long *)&(cdm_array[i].mask2));
+
+               if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE))
+                       client_verbose_index = i;
+
+               if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL))
+                       client_all_index = i;
+
+               cds_head = cds_delimiter + 1;
+       }
+
+       rc = cdm_element_count;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+
+out:
+
+       return rc;
+
+}
+
+/*
+ * /sys/kernel/debug/orangefs/debug-help can be catted to
+ * see all the available kernel and client debug keywords.
+ *
+ * When the kernel boots, we have no idea what keywords the
+ * client supports, nor their associated masks.
+ *
+ * We pass through this function once at boot and stamp a
+ * boilerplate "we don't know" message for the client in the
+ * debug-help file. We pass through here again when the client
+ * starts and then we can fill out the debug-help file fully.
+ *
+ * The client might be restarted any number of times between
+ * reboots, we only build the debug-help file the first time.
+ */
+int orangefs_prepare_debugfs_help_string(int at_boot)
+{
+       int rc = -EINVAL;
+       int i;
+       int byte_count = 0;
+       char *client_title = "Client Debug Keywords:\n";
+       char *kernel_title = "Kernel Debug Keywords:\n";
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       if (at_boot) {
+               byte_count += strlen(HELP_STRING_UNINITIALIZED);
+               client_title = HELP_STRING_UNINITIALIZED;
+       } else {
+               /*
+                * fill the client keyword/mask array and remember
+                * how many elements there were.
+                */
+               cdm_element_count =
+                       orangefs_prepare_cdm_array(client_debug_array_string);
+               if (cdm_element_count <= 0)
+                       goto out;
+
+               /* Count the bytes destined for debug_help_string. */
+               byte_count += strlen(client_title);
+
+               for (i = 0; i < cdm_element_count; i++) {
+                       byte_count += strlen(cdm_array[i].keyword + 2);
+                       if (byte_count >= DEBUG_HELP_STRING_SIZE) {
+                               pr_info("%s: overflow 1!\n", __func__);
+                               goto out;
+                       }
+               }
+
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "%s: cdm_element_count:%d:\n",
+                            __func__,
+                            cdm_element_count);
+       }
+
+       byte_count += strlen(kernel_title);
+       for (i = 0; i < num_kmod_keyword_mask_map; i++) {
+               byte_count +=
+                       strlen(s_kmod_keyword_mask_map[i].keyword + 2);
+               if (byte_count >= DEBUG_HELP_STRING_SIZE) {
+                       pr_info("%s: overflow 2!\n", __func__);
+                       goto out;
+               }
+       }
+
+       /* build debug_help_string. */
+       debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
+       if (!debug_help_string) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       strcat(debug_help_string, client_title);
+
+       if (!at_boot) {
+               for (i = 0; i < cdm_element_count; i++) {
+                       strcat(debug_help_string, "\t");
+                       strcat(debug_help_string, cdm_array[i].keyword);
+                       strcat(debug_help_string, "\n");
+               }
+       }
+
+       strcat(debug_help_string, "\n");
+       strcat(debug_help_string, kernel_title);
+
+       for (i = 0; i < num_kmod_keyword_mask_map; i++) {
+               strcat(debug_help_string, "\t");
+               strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
+               strcat(debug_help_string, "\n");
+       }
+
+       rc = 0;
+
+out:
+
+       return rc;
+
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ */
+void debug_mask_to_string(void *mask, int type)
+{
+       int i;
+       int len = 0;
+       char *debug_string;
+       int element_count = 0;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       if (type) {
+               debug_string = client_debug_string;
+               element_count = cdm_element_count;
+       } else {
+               debug_string = kernel_debug_string;
+               element_count = num_kmod_keyword_mask_map;
+       }
+
+       memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
+
+       /*
+        * Some keywords, like "all" or "verbose", are amalgams of
+        * numerous other keywords. Make a special check for those
+        * before grinding through the whole mask only to find out
+        * later...
+        */
+       if (check_amalgam_keyword(mask, type))
+               goto out;
+
+       /* Build the debug string. */
+       for (i = 0; i < element_count; i++)
+               if (type)
+                       do_c_string(mask, i);
+               else
+                       do_k_string(mask, i);
+
+       len = strlen(debug_string);
+
+       if ((len) && (type))
+               client_debug_string[len - 1] = '\0';
+       else if (len)
+               kernel_debug_string[len - 1] = '\0';
+       else if (type)
+               strcpy(client_debug_string, "none");
+       else
+               strcpy(kernel_debug_string, "none");
+
+out:
+gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string);
+
+       return;
+
+}
+
+void do_k_string(void *k_mask, int index)
+{
+       __u64 *mask = (__u64 *) k_mask;
+
+       if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword))
+               goto out;
+
+       if (*mask & s_kmod_keyword_mask_map[index].mask_val) {
+               if ((strlen(kernel_debug_string) +
+                    strlen(s_kmod_keyword_mask_map[index].keyword))
+                       < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) {
+                               strcat(kernel_debug_string,
+                                      s_kmod_keyword_mask_map[index].keyword);
+                               strcat(kernel_debug_string, ",");
+                       } else {
+                               gossip_err("%s: overflow!\n", __func__);
+                               strcpy(kernel_debug_string, ORANGEFS_ALL);
+                               goto out;
+                       }
+       }
+
+out:
+
+       return;
+}
+
+void do_c_string(void *c_mask, int index)
+{
+       struct client_debug_mask *mask = (struct client_debug_mask *) c_mask;
+
+       if (keyword_is_amalgam(cdm_array[index].keyword))
+               goto out;
+
+       if ((mask->mask1 & cdm_array[index].mask1) ||
+           (mask->mask2 & cdm_array[index].mask2)) {
+               if ((strlen(client_debug_string) +
+                    strlen(cdm_array[index].keyword) + 1)
+                       < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) {
+                               strcat(client_debug_string,
+                                      cdm_array[index].keyword);
+                               strcat(client_debug_string, ",");
+                       } else {
+                               gossip_err("%s: overflow!\n", __func__);
+                               strcpy(client_debug_string, ORANGEFS_ALL);
+                               goto out;
+                       }
+       }
+out:
+       return;
+}
+
+int keyword_is_amalgam(char *keyword)
+{
+       int rc = 0;
+
+       if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE)))
+               rc = 1;
+
+       return rc;
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ *
+ * return 1 if we found an amalgam.
+ */
+int check_amalgam_keyword(void *mask, int type)
+{
+       __u64 *k_mask;
+       struct client_debug_mask *c_mask;
+       int k_all_index = num_kmod_keyword_mask_map - 1;
+       int rc = 0;
+
+       if (type) {
+               c_mask = (struct client_debug_mask *) mask;
+
+               if ((c_mask->mask1 == cdm_array[client_all_index].mask1) &&
+                   (c_mask->mask2 == cdm_array[client_all_index].mask2)) {
+                       strcpy(client_debug_string, ORANGEFS_ALL);
+                       rc = 1;
+                       goto out;
+               }
+
+               if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) &&
+                   (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) {
+                       strcpy(client_debug_string, ORANGEFS_VERBOSE);
+                       rc = 1;
+                       goto out;
+               }
+
+       } else {
+               k_mask = (__u64 *) mask;
+
+               if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) {
+                       strcpy(kernel_debug_string, ORANGEFS_ALL);
+                       rc = 1;
+                       goto out;
+               }
+       }
+
+out:
+
+       return rc;
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ */
+void debug_string_to_mask(char *debug_string, void *mask, int type)
+{
+       char *unchecked_keyword;
+       int i;
+       char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL);
+       char *original_pointer;
+       int element_count = 0;
+       struct client_debug_mask *c_mask;
+       __u64 *k_mask;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       if (type) {
+               c_mask = (struct client_debug_mask *)mask;
+               element_count = cdm_element_count;
+       } else {
+               k_mask = (__u64 *)mask;
+               *k_mask = 0;
+               element_count = num_kmod_keyword_mask_map;
+       }
+
+       original_pointer = strsep_fodder;
+       while ((unchecked_keyword = strsep(&strsep_fodder, ",")))
+               if (strlen(unchecked_keyword)) {
+                       for (i = 0; i < element_count; i++)
+                               if (type)
+                                       do_c_mask(i,
+                                                 unchecked_keyword,
+                                                 &c_mask);
+                               else
+                                       do_k_mask(i,
+                                                 unchecked_keyword,
+                                                 &k_mask);
+               }
+
+       kfree(original_pointer);
+}
+
+void do_c_mask(int i,
+              char *unchecked_keyword,
+              struct client_debug_mask **sane_mask)
+{
+
+       if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) {
+               (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1;
+               (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2;
+       }
+}
+
+void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask)
+{
+
+       if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword))
+               **sane_mask = (**sane_mask) |
+                               s_kmod_keyword_mask_map[i].mask_val;
+}
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
new file mode 100644 (file)
index 0000000..6ac0c60
--- /dev/null
@@ -0,0 +1,453 @@
+#include <linux/types.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+
+extern struct client_debug_mask *cdm_array;
+extern char *debug_help_string;
+extern int help_string_initialized;
+extern struct dentry *debug_dir;
+extern struct dentry *help_file_dentry;
+extern struct dentry *client_debug_dentry;
+extern const struct file_operations debug_help_fops;
+extern int client_all_index;
+extern int client_verbose_index;
+extern int cdm_element_count;
+#define DEBUG_HELP_STRING_SIZE 4096
+#define HELP_STRING_UNINITIALIZED \
+       "Client Debug Keywords are unknown until the first time\n" \
+       "the client is started after boot.\n"
+#define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help"
+#define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug"
+#define ORANGEFS_CLIENT_DEBUG_FILE "client-debug"
+#define ORANGEFS_VERBOSE "verbose"
+#define ORANGEFS_ALL "all"
+
+/* pvfs2-config.h ***********************************************************/
+#define ORANGEFS_VERSION_MAJOR 2
+#define ORANGEFS_VERSION_MINOR 9
+#define ORANGEFS_VERSION_SUB 0
+
+/* khandle stuff  ***********************************************************/
+
+/*
+ * The 2.9 core will put 64 bit handles in here like this:
+ *    1234 0000 0000 5678
+ * The 3.0 and beyond cores will put 128 bit handles in here like this:
+ *    1234 5678 90AB CDEF
+ * The kernel module will always use the first four bytes and
+ * the last four bytes as an inum.
+ */
+struct orangefs_khandle {
+       unsigned char u[16];
+}  __aligned(8);
+
+/*
+ * kernel version of an object ref.
+ */
+struct orangefs_object_kref {
+       struct orangefs_khandle khandle;
+       __s32 fs_id;
+       __s32 __pad1;
+};
+
+/*
+ * compare 2 khandles assumes little endian thus from large address to
+ * small address
+ */
+static inline int ORANGEFS_khandle_cmp(const struct orangefs_khandle *kh1,
+                                  const struct orangefs_khandle *kh2)
+{
+       int i;
+
+       for (i = 15; i >= 0; i--) {
+               if (kh1->u[i] > kh2->u[i])
+                       return 1;
+               if (kh1->u[i] < kh2->u[i])
+                       return -1;
+       }
+
+       return 0;
+}
+
+static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh,
+                                  void *p, int size)
+{
+
+       memset(p, 0, size);
+       memcpy(p, kh->u, 16);
+
+}
+
+static inline void ORANGEFS_khandle_from(struct orangefs_khandle *kh,
+                                    void *p, int size)
+{
+       memset(kh, 0, 16);
+       memcpy(kh->u, p, 16);
+
+}
+
+/* pvfs2-types.h ************************************************************/
+typedef __u32 ORANGEFS_uid;
+typedef __u32 ORANGEFS_gid;
+typedef __s32 ORANGEFS_fs_id;
+typedef __u32 ORANGEFS_permissions;
+typedef __u64 ORANGEFS_time;
+typedef __s64 ORANGEFS_size;
+typedef __u64 ORANGEFS_flags;
+typedef __u64 ORANGEFS_ds_position;
+typedef __s32 ORANGEFS_error;
+typedef __s64 ORANGEFS_offset;
+
+#define ORANGEFS_SUPER_MAGIC 0x20030528
+
+/*
+ * ORANGEFS error codes are a signed 32-bit integer. Error codes are negative, but
+ * the sign is stripped before decoding.
+ */
+
+/* Bit 31 is not used since it is the sign. */
+
+/*
+ * Bit 30 specifies that this is a ORANGEFS error. A ORANGEFS error is either an
+ * encoded errno value or a ORANGEFS protocol error.
+ */
+#define ORANGEFS_ERROR_BIT (1 << 30)
+
+/*
+ * Bit 29 specifies that this is a ORANGEFS protocol error and not an encoded
+ * errno value.
+ */
+#define ORANGEFS_NON_ERRNO_ERROR_BIT (1 << 29)
+
+/*
+ * Bits 9, 8, and 7 specify the error class, which encodes the section of
+ * server code the error originated in for logging purposes. It is not used
+ * in the kernel except to be masked out.
+ */
+#define ORANGEFS_ERROR_CLASS_BITS 0x380
+
+/* Bits 6 - 0 are reserved for the actual error code. */
+#define ORANGEFS_ERROR_NUMBER_BITS 0x7f
+
+/* Encoded errno values decoded by PINT_errno_mapping in orangefs-utils.c. */
+
+/* Our own ORANGEFS protocol error codes. */
+#define ORANGEFS_ECANCEL    (1|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EDEVINIT   (2|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EDETAIL    (3|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EHOSTNTFD  (4|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EADDRNTFD  (5|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ENORECVR   (6|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ETRYAGAIN  (7|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ENOTPVFS   (8|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ESECURITY  (9|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+
+/* permission bits */
+#define ORANGEFS_O_EXECUTE (1 << 0)
+#define ORANGEFS_O_WRITE   (1 << 1)
+#define ORANGEFS_O_READ    (1 << 2)
+#define ORANGEFS_G_EXECUTE (1 << 3)
+#define ORANGEFS_G_WRITE   (1 << 4)
+#define ORANGEFS_G_READ    (1 << 5)
+#define ORANGEFS_U_EXECUTE (1 << 6)
+#define ORANGEFS_U_WRITE   (1 << 7)
+#define ORANGEFS_U_READ    (1 << 8)
+/* no ORANGEFS_U_VTX (sticky bit) */
+#define ORANGEFS_G_SGID    (1 << 10)
+#define ORANGEFS_U_SUID    (1 << 11)
+
+/* definition taken from stdint.h */
+#define INT32_MAX (2147483647)
+#define ORANGEFS_ITERATE_START    (INT32_MAX - 1)
+#define ORANGEFS_ITERATE_END      (INT32_MAX - 2)
+#define ORANGEFS_ITERATE_NEXT     (INT32_MAX - 3)
+#define ORANGEFS_READDIR_START ORANGEFS_ITERATE_START
+#define ORANGEFS_READDIR_END   ORANGEFS_ITERATE_END
+#define ORANGEFS_IMMUTABLE_FL FS_IMMUTABLE_FL
+#define ORANGEFS_APPEND_FL    FS_APPEND_FL
+#define ORANGEFS_NOATIME_FL   FS_NOATIME_FL
+#define ORANGEFS_MIRROR_FL    0x01000000ULL
+#define ORANGEFS_O_EXECUTE (1 << 0)
+#define ORANGEFS_FS_ID_NULL       ((__s32)0)
+#define ORANGEFS_ATTR_SYS_UID                   (1 << 0)
+#define ORANGEFS_ATTR_SYS_GID                   (1 << 1)
+#define ORANGEFS_ATTR_SYS_PERM                  (1 << 2)
+#define ORANGEFS_ATTR_SYS_ATIME                 (1 << 3)
+#define ORANGEFS_ATTR_SYS_CTIME                 (1 << 4)
+#define ORANGEFS_ATTR_SYS_MTIME                 (1 << 5)
+#define ORANGEFS_ATTR_SYS_TYPE                  (1 << 6)
+#define ORANGEFS_ATTR_SYS_ATIME_SET             (1 << 7)
+#define ORANGEFS_ATTR_SYS_MTIME_SET             (1 << 8)
+#define ORANGEFS_ATTR_SYS_SIZE                  (1 << 20)
+#define ORANGEFS_ATTR_SYS_LNK_TARGET            (1 << 24)
+#define ORANGEFS_ATTR_SYS_DFILE_COUNT           (1 << 25)
+#define ORANGEFS_ATTR_SYS_DIRENT_COUNT          (1 << 26)
+#define ORANGEFS_ATTR_SYS_BLKSIZE               (1 << 28)
+#define ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT   (1 << 29)
+#define ORANGEFS_ATTR_SYS_COMMON_ALL   \
+       (ORANGEFS_ATTR_SYS_UID  |       \
+        ORANGEFS_ATTR_SYS_GID  |       \
+        ORANGEFS_ATTR_SYS_PERM |       \
+        ORANGEFS_ATTR_SYS_ATIME        |       \
+        ORANGEFS_ATTR_SYS_CTIME        |       \
+        ORANGEFS_ATTR_SYS_MTIME        |       \
+        ORANGEFS_ATTR_SYS_TYPE)
+
+#define ORANGEFS_ATTR_SYS_ALL_SETABLE          \
+(ORANGEFS_ATTR_SYS_COMMON_ALL-ORANGEFS_ATTR_SYS_TYPE)
+
+#define ORANGEFS_ATTR_SYS_ALL_NOHINT                   \
+       (ORANGEFS_ATTR_SYS_COMMON_ALL           |       \
+        ORANGEFS_ATTR_SYS_SIZE                 |       \
+        ORANGEFS_ATTR_SYS_LNK_TARGET           |       \
+        ORANGEFS_ATTR_SYS_DFILE_COUNT          |       \
+        ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT  |       \
+        ORANGEFS_ATTR_SYS_DIRENT_COUNT         |       \
+        ORANGEFS_ATTR_SYS_BLKSIZE)
+
+#define ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE            \
+       (ORANGEFS_ATTR_SYS_COMMON_ALL           |       \
+        ORANGEFS_ATTR_SYS_LNK_TARGET           |       \
+        ORANGEFS_ATTR_SYS_DFILE_COUNT          |       \
+        ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT  |       \
+        ORANGEFS_ATTR_SYS_DIRENT_COUNT         |       \
+        ORANGEFS_ATTR_SYS_BLKSIZE)
+
+#define ORANGEFS_XATTR_REPLACE 0x2
+#define ORANGEFS_XATTR_CREATE  0x1
+#define ORANGEFS_MAX_SERVER_ADDR_LEN 256
+#define ORANGEFS_NAME_MAX                256
+/*
+ * max extended attribute name len as imposed by the VFS and exploited for the
+ * upcall request types.
+ * NOTE: Please retain them as multiples of 8 even if you wish to change them
+ * This is *NECESSARY* for supporting 32 bit user-space binaries on a 64-bit
+ * kernel. Due to implementation within DBPF, this really needs to be
+ * ORANGEFS_NAME_MAX, which it was the same value as, but no reason to let it
+ * break if that changes in the future.
+ */
+#define ORANGEFS_MAX_XATTR_NAMELEN   ORANGEFS_NAME_MAX /* Not the same as
+                                                * XATTR_NAME_MAX defined
+                                                * by <linux/xattr.h>
+                                                */
+#define ORANGEFS_MAX_XATTR_VALUELEN  8192      /* Not the same as XATTR_SIZE_MAX
+                                        * defined by <linux/xattr.h>
+                                        */
+#define ORANGEFS_MAX_XATTR_LISTLEN   16        /* Not the same as XATTR_LIST_MAX
+                                        * defined by <linux/xattr.h>
+                                        */
+/*
+ * ORANGEFS I/O operation types, used in both system and server interfaces.
+ */
+enum ORANGEFS_io_type {
+       ORANGEFS_IO_READ = 1,
+       ORANGEFS_IO_WRITE = 2
+};
+
+/*
+ * If this enum is modified the server parameters related to the precreate pool
+ * batch and low threshold sizes may need to be modified  to reflect this
+ * change.
+ */
+enum orangefs_ds_type {
+       ORANGEFS_TYPE_NONE = 0,
+       ORANGEFS_TYPE_METAFILE = (1 << 0),
+       ORANGEFS_TYPE_DATAFILE = (1 << 1),
+       ORANGEFS_TYPE_DIRECTORY = (1 << 2),
+       ORANGEFS_TYPE_SYMLINK = (1 << 3),
+       ORANGEFS_TYPE_DIRDATA = (1 << 4),
+       ORANGEFS_TYPE_INTERNAL = (1 << 5)       /* for the server's private use */
+};
+
+/*
+ * ORANGEFS_certificate simply stores a buffer with the buffer size.
+ * The buffer can be converted to an OpenSSL X509 struct for use.
+ */
+struct ORANGEFS_certificate {
+       __u32 buf_size;
+       unsigned char *buf;
+};
+
+/*
+ * A credential identifies a user and is signed by the client/user
+ * private key.
+ */
+struct ORANGEFS_credential {
+       __u32 userid;   /* user id */
+       __u32 num_groups;       /* length of group_array */
+       __u32 *group_array;     /* groups for which the user is a member */
+       char *issuer;           /* alias of the issuing server */
+       __u64 timeout;  /* seconds after epoch to time out */
+       __u32 sig_size; /* length of the signature in bytes */
+       unsigned char *signature;       /* digital signature */
+       struct ORANGEFS_certificate certificate;        /* user certificate buffer */
+};
+#define extra_size_ORANGEFS_credential (ORANGEFS_REQ_LIMIT_GROUPS      *       \
+                                   sizeof(__u32)               +       \
+                                   ORANGEFS_REQ_LIMIT_ISSUER   +       \
+                                   ORANGEFS_REQ_LIMIT_SIGNATURE        +       \
+                                   extra_size_ORANGEFS_certificate)
+
+/* This structure is used by the VFS-client interaction alone */
+struct ORANGEFS_keyval_pair {
+       char key[ORANGEFS_MAX_XATTR_NAMELEN];
+       __s32 key_sz;   /* __s32 for portable, fixed-size structures */
+       __s32 val_sz;
+       char val[ORANGEFS_MAX_XATTR_VALUELEN];
+};
+
+/* pvfs2-sysint.h ***********************************************************/
+/* Describes attributes for a file, directory, or symlink. */
+struct ORANGEFS_sys_attr_s {
+       __u32 owner;
+       __u32 group;
+       __u32 perms;
+       __u64 atime;
+       __u64 mtime;
+       __u64 ctime;
+       __s64 size;
+
+       /* NOTE: caller must free if valid */
+       char *link_target;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 dfile_count;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 distr_dir_servers_initial;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 distr_dir_servers_max;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 distr_dir_split_size;
+
+       __u32 mirror_copies_count;
+
+       /* NOTE: caller must free if valid */
+       char *dist_name;
+
+       /* NOTE: caller must free if valid */
+       char *dist_params;
+
+       __s64 dirent_count;
+       enum orangefs_ds_type objtype;
+       __u64 flags;
+       __u32 mask;
+       __s64 blksize;
+};
+
+#define ORANGEFS_LOOKUP_LINK_NO_FOLLOW 0
+#define ORANGEFS_LOOKUP_LINK_FOLLOW    1
+
+/* pint-dev.h ***************************************************************/
+
+/* parameter structure used in ORANGEFS_DEV_DEBUG ioctl command */
+struct dev_mask_info_s {
+       enum {
+               KERNEL_MASK,
+               CLIENT_MASK,
+       } mask_type;
+       __u64 mask_value;
+};
+
+struct dev_mask2_info_s {
+       __u64 mask1_value;
+       __u64 mask2_value;
+};
+
+/* pvfs2-util.h *************************************************************/
+__s32 ORANGEFS_util_translate_mode(int mode);
+
+/* pvfs2-debug.h ************************************************************/
+#include "orangefs-debug.h"
+
+/* pvfs2-internal.h *********************************************************/
+#define llu(x) (unsigned long long)(x)
+#define lld(x) (long long)(x)
+
+/* pint-dev-shared.h ********************************************************/
+#define ORANGEFS_DEV_MAGIC 'k'
+
+#define ORANGEFS_READDIR_DEFAULT_DESC_COUNT  5
+
+#define DEV_GET_MAGIC           0x1
+#define DEV_GET_MAX_UPSIZE      0x2
+#define DEV_GET_MAX_DOWNSIZE    0x3
+#define DEV_MAP                 0x4
+#define DEV_REMOUNT_ALL         0x5
+#define DEV_DEBUG               0x6
+#define DEV_UPSTREAM            0x7
+#define DEV_CLIENT_MASK         0x8
+#define DEV_CLIENT_STRING       0x9
+#define DEV_MAX_NR              0xa
+
+/* supported ioctls, codes are with respect to user-space */
+enum {
+       ORANGEFS_DEV_GET_MAGIC = _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAGIC, __s32),
+       ORANGEFS_DEV_GET_MAX_UPSIZE =
+           _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_UPSIZE, __s32),
+       ORANGEFS_DEV_GET_MAX_DOWNSIZE =
+           _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_DOWNSIZE, __s32),
+       ORANGEFS_DEV_MAP = _IO(ORANGEFS_DEV_MAGIC, DEV_MAP),
+       ORANGEFS_DEV_REMOUNT_ALL = _IO(ORANGEFS_DEV_MAGIC, DEV_REMOUNT_ALL),
+       ORANGEFS_DEV_DEBUG = _IOR(ORANGEFS_DEV_MAGIC, DEV_DEBUG, __s32),
+       ORANGEFS_DEV_UPSTREAM = _IOW(ORANGEFS_DEV_MAGIC, DEV_UPSTREAM, int),
+       ORANGEFS_DEV_CLIENT_MASK = _IOW(ORANGEFS_DEV_MAGIC,
+                                   DEV_CLIENT_MASK,
+                                   struct dev_mask2_info_s),
+       ORANGEFS_DEV_CLIENT_STRING = _IOW(ORANGEFS_DEV_MAGIC,
+                                     DEV_CLIENT_STRING,
+                                     char *),
+       ORANGEFS_DEV_MAXNR = DEV_MAX_NR,
+};
+
+/*
+ * version number for use in communicating between kernel space and user
+ * space. Zero signifies the upstream version of the kernel module.
+ */
+#define ORANGEFS_KERNEL_PROTO_VERSION 0
+#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904
+
+/*
+ * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl.
+ * NOTE: See devorangefs-req.c for 32 bit compat structure.
+ * Since this structure has a variable-sized layout that is different
+ * on 32 and 64 bit platforms, we need to normalize to a 64 bit layout
+ * on such systems before servicing ioctl calls from user-space binaries
+ * that may be 32 bit!
+ */
+struct ORANGEFS_dev_map_desc {
+       void *ptr;
+       __s32 total_size;
+       __s32 size;
+       __s32 count;
+};
+
+/* gossip.h *****************************************************************/
+
+#ifdef GOSSIP_DISABLE_DEBUG
+#define gossip_debug(mask, format, f...) do {} while (0)
+#else
+extern __u64 gossip_debug_mask;
+extern struct client_debug_mask client_debug_mask;
+
+/* try to avoid function call overhead by checking masks in macro */
+#define gossip_debug(mask, format, f...)                       \
+do {                                                           \
+       if (gossip_debug_mask & mask)                           \
+               printk(format, ##f);                            \
+} while (0)
+#endif /* GOSSIP_DISABLE_DEBUG */
+
+/* do file and line number printouts w/ the GNU preprocessor */
+#define gossip_ldebug(mask, format, f...)                              \
+               gossip_debug(mask, "%s: " format, __func__, ##f)
+
+#define gossip_err printk
+#define gossip_lerr(format, f...)                                      \
+               gossip_err("%s line %d: " format,                       \
+                          __FILE__,                                    \
+                          __LINE__,                                    \
+                          ##f)
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
new file mode 100644 (file)
index 0000000..93cc352
--- /dev/null
@@ -0,0 +1,544 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+#include <linux/parser.h>
+
+/* a cache for orangefs-inode objects (i.e. orangefs inode private data) */
+static struct kmem_cache *orangefs_inode_cache;
+
+/* list for storing orangefs specific superblocks in use */
+LIST_HEAD(orangefs_superblocks);
+
+DEFINE_SPINLOCK(orangefs_superblocks_lock);
+
+enum {
+       Opt_intr,
+       Opt_acl,
+       Opt_local_lock,
+
+       Opt_err
+};
+
+static const match_table_t tokens = {
+       { Opt_acl,              "acl" },
+       { Opt_intr,             "intr" },
+       { Opt_local_lock,       "local_lock" },
+       { Opt_err,      NULL }
+};
+
+
+static int parse_mount_options(struct super_block *sb, char *options,
+               int silent)
+{
+       struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(sb);
+       substring_t args[MAX_OPT_ARGS];
+       char *p;
+
+       /*
+        * Force any potential flags that might be set from the mount
+        * to zero, ie, initialize to unset.
+        */
+       sb->s_flags &= ~MS_POSIXACL;
+       orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
+       orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+
+               if (!*p)
+                       continue;
+
+               token = match_token(p, tokens, args);
+               switch (token) {
+               case Opt_acl:
+                       sb->s_flags |= MS_POSIXACL;
+                       break;
+               case Opt_intr:
+                       orangefs_sb->flags |= ORANGEFS_OPT_INTR;
+                       break;
+               case Opt_local_lock:
+                       orangefs_sb->flags |= ORANGEFS_OPT_LOCAL_LOCK;
+                       break;
+               default:
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       if (!silent)
+               gossip_err("Error: mount option [%s] is not supported.\n", p);
+       return -EINVAL;
+}
+
+static void orangefs_inode_cache_ctor(void *req)
+{
+       struct orangefs_inode_s *orangefs_inode = req;
+
+       inode_init_once(&orangefs_inode->vfs_inode);
+       init_rwsem(&orangefs_inode->xattr_sem);
+
+       orangefs_inode->vfs_inode.i_version = 1;
+}
+
+static struct inode *orangefs_alloc_inode(struct super_block *sb)
+{
+       struct orangefs_inode_s *orangefs_inode;
+
+       orangefs_inode = kmem_cache_alloc(orangefs_inode_cache, GFP_KERNEL);
+       if (orangefs_inode == NULL) {
+               gossip_err("Failed to allocate orangefs_inode\n");
+               return NULL;
+       }
+
+       /*
+        * We want to clear everything except for rw_semaphore and the
+        * vfs_inode.
+        */
+       memset(&orangefs_inode->refn.khandle, 0, 16);
+       orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL;
+       orangefs_inode->last_failed_block_index_read = 0;
+       memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target));
+       orangefs_inode->pinode_flags = 0;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_alloc_inode: allocated %p\n",
+                    &orangefs_inode->vfs_inode);
+       return &orangefs_inode->vfs_inode;
+}
+
+static void orangefs_destroy_inode(struct inode *inode)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                       "%s: deallocated %p destroying inode %pU\n",
+                       __func__, orangefs_inode, get_khandle_from_ino(inode));
+
+       kmem_cache_free(orangefs_inode_cache, orangefs_inode);
+}
+
+/*
+ * NOTE: information filled in here is typically reflected in the
+ * output of the system command 'df'
+*/
+static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+       int ret = -ENOMEM;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int flags = 0;
+       struct super_block *sb = NULL;
+
+       sb = dentry->d_sb;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_statfs: called on sb %p (fs_id is %d)\n",
+                    sb,
+                    (int)(ORANGEFS_SB(sb)->fs_id));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_STATFS);
+       if (!new_op)
+               return ret;
+       new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id;
+
+       if (ORANGEFS_SB(sb)->flags & ORANGEFS_OPT_INTR)
+               flags = ORANGEFS_OP_INTERRUPTIBLE;
+
+       ret = service_operation(new_op, "orangefs_statfs", flags);
+
+       if (new_op->downcall.status < 0)
+               goto out_op_release;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "%s: got %ld blocks available | "
+                    "%ld blocks total | %ld block size | "
+                    "%ld files total | %ld files avail\n",
+                    __func__,
+                    (long)new_op->downcall.resp.statfs.blocks_avail,
+                    (long)new_op->downcall.resp.statfs.blocks_total,
+                    (long)new_op->downcall.resp.statfs.block_size,
+                    (long)new_op->downcall.resp.statfs.files_total,
+                    (long)new_op->downcall.resp.statfs.files_avail);
+
+       buf->f_type = sb->s_magic;
+       memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
+       buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+       buf->f_namelen = ORANGEFS_NAME_LEN;
+
+       buf->f_blocks = (sector_t) new_op->downcall.resp.statfs.blocks_total;
+       buf->f_bfree = (sector_t) new_op->downcall.resp.statfs.blocks_avail;
+       buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail;
+       buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total;
+       buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail;
+       buf->f_frsize = sb->s_blocksize;
+
+out_op_release:
+       op_release(new_op);
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_statfs: returning %d\n", ret);
+       return ret;
+}
+
+/*
+ * Remount as initiated by VFS layer.  We just need to reparse the mount
+ * options, no need to signal pvfs2-client-core about it.
+ */
+static int orangefs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount_fs: called\n");
+       return parse_mount_options(sb, data, 1);
+}
+
+/*
+ * Remount as initiated by pvfs2-client-core on restart.  This is used to
+ * repopulate mount information left from previous pvfs2-client-core.
+ *
+ * the idea here is that given a valid superblock, we're
+ * re-initializing the user space client with the initial mount
+ * information specified when the super block was first initialized.
+ * this is very different than the first initialization/creation of a
+ * superblock.  we use the special service_priority_operation to make
+ * sure that the mount gets ahead of any other pending operation that
+ * is waiting for servicing.  this means that the pvfs2-client won't
+ * fail to start several times for all other pending operations before
+ * the client regains all of the mount information from us.
+ * NOTE: this function assumes that the request_mutex is already acquired!
+ */
+int orangefs_remount(struct super_block *sb)
+{
+       struct orangefs_kernel_op_s *new_op;
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: called\n");
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT);
+       if (!new_op)
+               return -ENOMEM;
+       strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
+               ORANGEFS_SB(sb)->devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Attempting ORANGEFS Remount via host %s\n",
+                    new_op->upcall.req.fs_mount.orangefs_config_server);
+
+       /*
+        * we assume that the calling function has already acquire the
+        * request_mutex to prevent other operations from bypassing
+        * this one
+        */
+       ret = service_operation(new_op, "orangefs_remount",
+               ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_SEMAPHORE);
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_remount: mount got return value of %d\n",
+                    ret);
+       if (ret == 0) {
+               /*
+                * store the id assigned to this sb -- it's just a
+                * short-lived mapping that the system interface uses
+                * to map this superblock to a particular mount entry
+                */
+               ORANGEFS_SB(sb)->id = new_op->downcall.resp.fs_mount.id;
+               ORANGEFS_SB(sb)->mount_pending = 0;
+       }
+
+       op_release(new_op);
+       return ret;
+}
+
+int fsid_key_table_initialize(void)
+{
+       return 0;
+}
+
+void fsid_key_table_finalize(void)
+{
+}
+
+/* Called whenever the VFS dirties the inode in response to atime updates */
+static void orangefs_dirty_inode(struct inode *inode, int flags)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_dirty_inode: %pU\n",
+                    get_khandle_from_ino(inode));
+       SetAtimeFlag(orangefs_inode);
+}
+
+static const struct super_operations orangefs_s_ops = {
+       .alloc_inode = orangefs_alloc_inode,
+       .destroy_inode = orangefs_destroy_inode,
+       .dirty_inode = orangefs_dirty_inode,
+       .drop_inode = generic_delete_inode,
+       .statfs = orangefs_statfs,
+       .remount_fs = orangefs_remount_fs,
+       .show_options = generic_show_options,
+};
+
+static struct dentry *orangefs_fh_to_dentry(struct super_block *sb,
+                                 struct fid *fid,
+                                 int fh_len,
+                                 int fh_type)
+{
+       struct orangefs_object_kref refn;
+
+       if (fh_len < 5 || fh_type > 2)
+               return NULL;
+
+       ORANGEFS_khandle_from(&(refn.khandle), fid->raw, 16);
+       refn.fs_id = (u32) fid->raw[4];
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "fh_to_dentry: handle %pU, fs_id %d\n",
+                    &refn.khandle,
+                    refn.fs_id);
+
+       return d_obtain_alias(orangefs_iget(sb, &refn));
+}
+
+static int orangefs_encode_fh(struct inode *inode,
+                   __u32 *fh,
+                   int *max_len,
+                   struct inode *parent)
+{
+       int len = parent ? 10 : 5;
+       int type = 1;
+       struct orangefs_object_kref refn;
+
+       if (*max_len < len) {
+               gossip_lerr("fh buffer is too small for encoding\n");
+               *max_len = len;
+               type = 255;
+               goto out;
+       }
+
+       refn = ORANGEFS_I(inode)->refn;
+       ORANGEFS_khandle_to(&refn.khandle, fh, 16);
+       fh[4] = refn.fs_id;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Encoding fh: handle %pU, fsid %u\n",
+                    &refn.khandle,
+                    refn.fs_id);
+
+
+       if (parent) {
+               refn = ORANGEFS_I(parent)->refn;
+               ORANGEFS_khandle_to(&refn.khandle, (char *) fh + 20, 16);
+               fh[9] = refn.fs_id;
+
+               type = 2;
+               gossip_debug(GOSSIP_SUPER_DEBUG,
+                            "Encoding parent: handle %pU, fsid %u\n",
+                            &refn.khandle,
+                            refn.fs_id);
+       }
+       *max_len = len;
+
+out:
+       return type;
+}
+
+static const struct export_operations orangefs_export_ops = {
+       .encode_fh = orangefs_encode_fh,
+       .fh_to_dentry = orangefs_fh_to_dentry,
+};
+
+static int orangefs_fill_sb(struct super_block *sb,
+               struct orangefs_fs_mount_response *fs_mount,
+               void *data, int silent)
+{
+       int ret = -EINVAL;
+       struct inode *root = NULL;
+       struct dentry *root_dentry = NULL;
+       struct orangefs_object_kref root_object;
+
+       /* alloc and init our private orangefs sb info */
+       sb->s_fs_info =
+               kzalloc(sizeof(struct orangefs_sb_info_s), ORANGEFS_GFP_FLAGS);
+       if (!ORANGEFS_SB(sb))
+               return -ENOMEM;
+       ORANGEFS_SB(sb)->sb = sb;
+
+       ORANGEFS_SB(sb)->root_khandle = fs_mount->root_khandle;
+       ORANGEFS_SB(sb)->fs_id = fs_mount->fs_id;
+       ORANGEFS_SB(sb)->id = fs_mount->id;
+
+       if (data) {
+               ret = parse_mount_options(sb, data, silent);
+               if (ret)
+                       return ret;
+       }
+
+       /* Hang the xattr handlers off the superblock */
+       sb->s_xattr = orangefs_xattr_handlers;
+       sb->s_magic = ORANGEFS_SUPER_MAGIC;
+       sb->s_op = &orangefs_s_ops;
+       sb->s_d_op = &orangefs_dentry_operations;
+
+       sb->s_blocksize = orangefs_bufmap_size_query();
+       sb->s_blocksize_bits = orangefs_bufmap_shift_query();
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+
+       root_object.khandle = ORANGEFS_SB(sb)->root_khandle;
+       root_object.fs_id = ORANGEFS_SB(sb)->fs_id;
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "get inode %pU, fsid %d\n",
+                    &root_object.khandle,
+                    root_object.fs_id);
+
+       root = orangefs_iget(sb, &root_object);
+       if (IS_ERR(root))
+               return PTR_ERR(root);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Allocated root inode [%p] with mode %x\n",
+                    root,
+                    root->i_mode);
+
+       /* allocates and places root dentry in dcache */
+       root_dentry = d_make_root(root);
+       if (!root_dentry)
+               return -ENOMEM;
+
+       sb->s_export_op = &orangefs_export_ops;
+       sb->s_root = root_dentry;
+       return 0;
+}
+
+struct dentry *orangefs_mount(struct file_system_type *fst,
+                          int flags,
+                          const char *devname,
+                          void *data)
+{
+       int ret = -EINVAL;
+       struct super_block *sb = ERR_PTR(-EINVAL);
+       struct orangefs_kernel_op_s *new_op;
+       struct dentry *d = ERR_PTR(-EINVAL);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_mount: called with devname %s\n",
+                    devname);
+
+       if (!devname) {
+               gossip_err("ERROR: device name not specified.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT);
+       if (!new_op)
+               return ERR_PTR(-ENOMEM);
+
+       strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
+               devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Attempting ORANGEFS Mount via host %s\n",
+                    new_op->upcall.req.fs_mount.orangefs_config_server);
+
+       ret = service_operation(new_op, "orangefs_mount", 0);
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_mount: mount got return value of %d\n", ret);
+       if (ret)
+               goto free_op;
+
+       if (new_op->downcall.resp.fs_mount.fs_id == ORANGEFS_FS_ID_NULL) {
+               gossip_err("ERROR: Retrieved null fs_id\n");
+               ret = -EINVAL;
+               goto free_op;
+       }
+
+       sb = sget(fst, NULL, set_anon_super, flags, NULL);
+
+       if (IS_ERR(sb)) {
+               d = ERR_CAST(sb);
+               goto free_op;
+       }
+
+       ret = orangefs_fill_sb(sb,
+             &new_op->downcall.resp.fs_mount, data,
+             flags & MS_SILENT ? 1 : 0);
+
+       if (ret) {
+               d = ERR_PTR(ret);
+               goto free_op;
+       }
+
+       /*
+        * on successful mount, store the devname and data
+        * used
+        */
+       strncpy(ORANGEFS_SB(sb)->devname,
+               devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       /* mount_pending must be cleared */
+       ORANGEFS_SB(sb)->mount_pending = 0;
+
+       /*
+        * finally, add this sb to our list of known orangefs
+        * sb's
+        */
+       add_orangefs_sb(sb);
+       op_release(new_op);
+       return dget(sb->s_root);
+
+free_op:
+       gossip_err("orangefs_mount: mount request failed with %d\n", ret);
+       if (ret == -EINVAL) {
+               gossip_err("Ensure that all orangefs-servers have the same FS configuration files\n");
+               gossip_err("Look at pvfs2-client-core log file (typically /tmp/pvfs2-client.log) for more details\n");
+       }
+
+       op_release(new_op);
+
+       return d;
+}
+
+void orangefs_kill_sb(struct super_block *sb)
+{
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n");
+
+       /*
+        * issue the unmount to userspace to tell it to remove the
+        * dynamic mount info it has for this superblock
+        */
+       orangefs_unmount_sb(sb);
+
+       /* remove the sb from our list of orangefs specific sb's */
+       remove_orangefs_sb(sb);
+
+       /* provided sb cleanup */
+       kill_anon_super(sb);
+
+       /* free the orangefs superblock private data */
+       kfree(ORANGEFS_SB(sb));
+}
+
+int orangefs_inode_cache_initialize(void)
+{
+       orangefs_inode_cache = kmem_cache_create("orangefs_inode_cache",
+                                             sizeof(struct orangefs_inode_s),
+                                             0,
+                                             ORANGEFS_CACHE_CREATE_FLAGS,
+                                             orangefs_inode_cache_ctor);
+
+       if (!orangefs_inode_cache) {
+               gossip_err("Cannot create orangefs_inode_cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+int orangefs_inode_cache_finalize(void)
+{
+       kmem_cache_destroy(orangefs_inode_cache);
+       return 0;
+}
diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c
new file mode 100644 (file)
index 0000000..943884b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+static const char *orangefs_get_link(struct dentry *dentry, struct inode *inode,
+                                    struct delayed_call *done)
+{
+       char *target;
+
+       if (!dentry)
+               return ERR_PTR(-ECHILD);
+
+       target =  ORANGEFS_I(dentry->d_inode)->link_target;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "%s: called on %s (target is %p)\n",
+                    __func__, (char *)dentry->d_name.name, target);
+
+       return target;
+}
+
+struct inode_operations orangefs_symlink_inode_operations = {
+       .readlink = generic_readlink,
+       .get_link = orangefs_get_link,
+       .setattr = orangefs_setattr,
+       .getattr = orangefs_getattr,
+       .listxattr = orangefs_listxattr,
+       .setxattr = generic_setxattr,
+       .permission = orangefs_permission,
+};
diff --git a/fs/orangefs/upcall.h b/fs/orangefs/upcall.h
new file mode 100644 (file)
index 0000000..781cbc3
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#ifndef __UPCALL_H
+#define __UPCALL_H
+
+/*
+ * Sanitized this header file to fix
+ * 32-64 bit interaction issues between
+ * client-core and device
+ */
+struct orangefs_io_request_s {
+       __s32 async_vfs_io;
+       __s32 buf_index;
+       __s32 count;
+       __s32 __pad1;
+       __s64 offset;
+       struct orangefs_object_kref refn;
+       enum ORANGEFS_io_type io_type;
+       __s32 readahead_size;
+};
+
+struct orangefs_lookup_request_s {
+       __s32 sym_follow;
+       __s32 __pad1;
+       struct orangefs_object_kref parent_refn;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_create_request_s {
+       struct orangefs_object_kref parent_refn;
+       struct ORANGEFS_sys_attr_s attributes;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_symlink_request_s {
+       struct orangefs_object_kref parent_refn;
+       struct ORANGEFS_sys_attr_s attributes;
+       char entry_name[ORANGEFS_NAME_LEN];
+       char target[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_getattr_request_s {
+       struct orangefs_object_kref refn;
+       __u32 mask;
+       __u32 __pad1;
+};
+
+struct orangefs_setattr_request_s {
+       struct orangefs_object_kref refn;
+       struct ORANGEFS_sys_attr_s attributes;
+};
+
+struct orangefs_remove_request_s {
+       struct orangefs_object_kref parent_refn;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_mkdir_request_s {
+       struct orangefs_object_kref parent_refn;
+       struct ORANGEFS_sys_attr_s attributes;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_readdir_request_s {
+       struct orangefs_object_kref refn;
+       __u64 token;
+       __s32 max_dirent_count;
+       __s32 buf_index;
+};
+
+struct orangefs_readdirplus_request_s {
+       struct orangefs_object_kref refn;
+       __u64 token;
+       __s32 max_dirent_count;
+       __u32 mask;
+       __s32 buf_index;
+       __s32 __pad1;
+};
+
+struct orangefs_rename_request_s {
+       struct orangefs_object_kref old_parent_refn;
+       struct orangefs_object_kref new_parent_refn;
+       char d_old_name[ORANGEFS_NAME_LEN];
+       char d_new_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_statfs_request_s {
+       __s32 fs_id;
+       __s32 __pad1;
+};
+
+struct orangefs_truncate_request_s {
+       struct orangefs_object_kref refn;
+       __s64 size;
+};
+
+struct orangefs_mmap_ra_cache_flush_request_s {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_fs_mount_request_s {
+       char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN];
+};
+
+struct orangefs_fs_umount_request_s {
+       __s32 id;
+       __s32 fs_id;
+       char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN];
+};
+
+struct orangefs_getxattr_request_s {
+       struct orangefs_object_kref refn;
+       __s32 key_sz;
+       __s32 __pad1;
+       char key[ORANGEFS_MAX_XATTR_NAMELEN];
+};
+
+struct orangefs_setxattr_request_s {
+       struct orangefs_object_kref refn;
+       struct ORANGEFS_keyval_pair keyval;
+       __s32 flags;
+       __s32 __pad1;
+};
+
+struct orangefs_listxattr_request_s {
+       struct orangefs_object_kref refn;
+       __s32 requested_count;
+       __s32 __pad1;
+       __u64 token;
+};
+
+struct orangefs_removexattr_request_s {
+       struct orangefs_object_kref refn;
+       __s32 key_sz;
+       __s32 __pad1;
+       char key[ORANGEFS_MAX_XATTR_NAMELEN];
+};
+
+struct orangefs_op_cancel_s {
+       __u64 op_tag;
+};
+
+struct orangefs_fsync_request_s {
+       struct orangefs_object_kref refn;
+};
+
+enum orangefs_param_request_type {
+       ORANGEFS_PARAM_REQUEST_SET = 1,
+       ORANGEFS_PARAM_REQUEST_GET = 2
+};
+
+enum orangefs_param_request_op {
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS = 1,
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT = 2,
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT = 3,
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE = 4,
+       ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS = 5,
+       ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE = 6,
+       ORANGEFS_PARAM_REQUEST_OP_PERF_RESET = 7,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS = 8,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT = 9,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT = 10,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE = 11,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_TIMEOUT_MSECS = 12,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_HARD_LIMIT = 13,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_SOFT_LIMIT = 14,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_RECLAIM_PERCENTAGE = 15,
+       ORANGEFS_PARAM_REQUEST_OP_CLIENT_DEBUG = 16,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS = 17,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT = 18,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT = 19,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE = 20,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS = 21,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT = 22,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT = 23,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE = 24,
+       ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES = 25,
+};
+
+struct orangefs_param_request_s {
+       enum orangefs_param_request_type type;
+       enum orangefs_param_request_op op;
+       __s64 value;
+       char s_value[ORANGEFS_MAX_DEBUG_STRING_LEN];
+};
+
+enum orangefs_perf_count_request_type {
+       ORANGEFS_PERF_COUNT_REQUEST_ACACHE = 1,
+       ORANGEFS_PERF_COUNT_REQUEST_NCACHE = 2,
+       ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE = 3,
+};
+
+struct orangefs_perf_count_request_s {
+       enum orangefs_perf_count_request_type type;
+       __s32 __pad1;
+};
+
+struct orangefs_fs_key_request_s {
+       __s32 fsid;
+       __s32 __pad1;
+};
+
+struct orangefs_upcall_s {
+       __s32 type;
+       __u32 uid;
+       __u32 gid;
+       int pid;
+       int tgid;
+       /* Trailers unused but must be retained for protocol compatibility. */
+       __s64 trailer_size;
+       char *trailer_buf;
+
+       union {
+               struct orangefs_io_request_s io;
+               struct orangefs_lookup_request_s lookup;
+               struct orangefs_create_request_s create;
+               struct orangefs_symlink_request_s sym;
+               struct orangefs_getattr_request_s getattr;
+               struct orangefs_setattr_request_s setattr;
+               struct orangefs_remove_request_s remove;
+               struct orangefs_mkdir_request_s mkdir;
+               struct orangefs_readdir_request_s readdir;
+               struct orangefs_readdirplus_request_s readdirplus;
+               struct orangefs_rename_request_s rename;
+               struct orangefs_statfs_request_s statfs;
+               struct orangefs_truncate_request_s truncate;
+               struct orangefs_mmap_ra_cache_flush_request_s ra_cache_flush;
+               struct orangefs_fs_mount_request_s fs_mount;
+               struct orangefs_fs_umount_request_s fs_umount;
+               struct orangefs_getxattr_request_s getxattr;
+               struct orangefs_setxattr_request_s setxattr;
+               struct orangefs_listxattr_request_s listxattr;
+               struct orangefs_removexattr_request_s removexattr;
+               struct orangefs_op_cancel_s cancel;
+               struct orangefs_fsync_request_s fsync;
+               struct orangefs_param_request_s param;
+               struct orangefs_perf_count_request_s perf_count;
+               struct orangefs_fs_key_request_s fs_key;
+       } req;
+};
+
+#endif /* __UPCALL_H */
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
new file mode 100644 (file)
index 0000000..191d886
--- /dev/null
@@ -0,0 +1,506 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ * (C) 2011 Omnibond Systems
+ *
+ * Changes by Acxiom Corporation to implement generic service_operation()
+ * function, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  In-kernel waitqueue operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *);
+static int wait_for_matching_downcall(struct orangefs_kernel_op_s *);
+
+/*
+ * What we do in this function is to walk the list of operations that are
+ * present in the request queue and mark them as purged.
+ * NOTE: This is called from the device close after client-core has
+ * guaranteed that no new operations could appear on the list since the
+ * client-core is anyway going to exit.
+ */
+void purge_waiting_ops(void)
+{
+       struct orangefs_kernel_op_s *op;
+
+       spin_lock(&orangefs_request_list_lock);
+       list_for_each_entry(op, &orangefs_request_list, list) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "pvfs2-client-core: purging op tag %llu %s\n",
+                            llu(op->tag),
+                            get_opname_string(op));
+               spin_lock(&op->lock);
+               set_op_state_purged(op);
+               spin_unlock(&op->lock);
+       }
+       spin_unlock(&orangefs_request_list_lock);
+}
+
+static inline void
+add_op_to_request_list(struct orangefs_kernel_op_s *op)
+{
+       spin_lock(&orangefs_request_list_lock);
+       spin_lock(&op->lock);
+       set_op_state_waiting(op);
+       list_add_tail(&op->list, &orangefs_request_list);
+       spin_unlock(&orangefs_request_list_lock);
+       spin_unlock(&op->lock);
+       wake_up_interruptible(&orangefs_request_list_waitq);
+}
+
+static inline
+void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
+{
+       spin_lock(&orangefs_request_list_lock);
+       spin_lock(&op->lock);
+       set_op_state_waiting(op);
+
+       list_add(&op->list, &orangefs_request_list);
+       spin_unlock(&orangefs_request_list_lock);
+       spin_unlock(&op->lock);
+       wake_up_interruptible(&orangefs_request_list_waitq);
+}
+
+/*
+ * submits a ORANGEFS operation and waits for it to complete
+ *
+ * Note op->downcall.status will contain the status of the operation (in
+ * errno format), whether provided by pvfs2-client or a result of failure to
+ * service the operation.  If the caller wishes to distinguish, then
+ * op->state can be checked to see if it was serviced or not.
+ *
+ * Returns contents of op->downcall.status for convenience
+ */
+int service_operation(struct orangefs_kernel_op_s *op,
+                     const char *op_name,
+                     int flags)
+{
+       /* flags to modify behavior */
+       sigset_t orig_sigset;
+       int ret = 0;
+
+       DEFINE_WAIT(wait_entry);
+
+       op->upcall.tgid = current->tgid;
+       op->upcall.pid = current->pid;
+
+retry_servicing:
+       op->downcall.status = 0;
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "orangefs: service_operation: %s %p\n",
+                    op_name,
+                    op);
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "orangefs: operation posted by process: %s, pid: %i\n",
+                    current->comm,
+                    current->pid);
+
+       /* mask out signals if this operation is not to be interrupted */
+       if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
+               orangefs_block_signals(&orig_sigset);
+
+       if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
+               ret = mutex_lock_interruptible(&request_mutex);
+               /*
+                * check to see if we were interrupted while waiting for
+                * semaphore
+                */
+               if (ret < 0) {
+                       if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
+                               orangefs_set_signals(&orig_sigset);
+                       op->downcall.status = ret;
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "orangefs: service_operation interrupted.\n");
+                       return ret;
+               }
+       }
+
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "%s:About to call is_daemon_in_service().\n",
+                    __func__);
+
+       if (is_daemon_in_service() < 0) {
+               /*
+                * By incrementing the per-operation attempt counter, we
+                * directly go into the timeout logic while waiting for
+                * the matching downcall to be read
+                */
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:client core is NOT in service(%d).\n",
+                            __func__,
+                            is_daemon_in_service());
+               op->attempts++;
+       }
+
+       /* queue up the operation */
+       if (flags & ORANGEFS_OP_PRIORITY) {
+               add_priority_op_to_request_list(op);
+       } else {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:About to call add_op_to_request_list().\n",
+                            __func__);
+               add_op_to_request_list(op);
+       }
+
+       if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
+               mutex_unlock(&request_mutex);
+
+       /*
+        * If we are asked to service an asynchronous operation from
+        * VFS perspective, we are done.
+        */
+       if (flags & ORANGEFS_OP_ASYNC)
+               return 0;
+
+       if (flags & ORANGEFS_OP_CANCELLATION) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:"
+                            "About to call wait_for_cancellation_downcall.\n",
+                            __func__);
+               ret = wait_for_cancellation_downcall(op);
+       } else {
+               ret = wait_for_matching_downcall(op);
+       }
+
+       if (ret < 0) {
+               /* failed to get matching downcall */
+               if (ret == -ETIMEDOUT) {
+                       gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
+                                  op_name);
+               }
+               op->downcall.status = ret;
+       } else {
+               /* got matching downcall; make sure status is in errno format */
+               op->downcall.status =
+                   orangefs_normalize_to_errno(op->downcall.status);
+               ret = op->downcall.status;
+       }
+
+       if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
+               orangefs_set_signals(&orig_sigset);
+
+       BUG_ON(ret != op->downcall.status);
+       /* retry if operation has not been serviced and if requested */
+       if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "orangefs: tag %llu (%s)"
+                            " -- operation to be retried (%d attempt)\n",
+                            llu(op->tag),
+                            op_name,
+                            op->attempts + 1);
+
+               if (!op->uses_shared_memory)
+                       /*
+                        * this operation doesn't use the shared memory
+                        * system
+                        */
+                       goto retry_servicing;
+
+               /* op uses shared memory */
+               if (orangefs_get_bufmap_init() == 0) {
+                       WARN_ON(1);
+                       /*
+                        * This operation uses the shared memory system AND
+                        * the system is not yet ready. This situation occurs
+                        * when the client-core is restarted AND there were
+                        * operations waiting to be processed or were already
+                        * in process.
+                        */
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "uses_shared_memory is true.\n");
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "Client core in-service status(%d).\n",
+                                    is_daemon_in_service());
+                       gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
+                                    orangefs_get_bufmap_init());
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "operation's status is 0x%0x.\n",
+                                    op->op_state);
+
+                       /*
+                        * let process sleep for a few seconds so shared
+                        * memory system can be initialized.
+                        */
+                       prepare_to_wait(&orangefs_bufmap_init_waitq,
+                                       &wait_entry,
+                                       TASK_INTERRUPTIBLE);
+
+                       /*
+                        * Wait for orangefs_bufmap_initialize() to wake me up
+                        * within the allotted time.
+                        */
+                       ret = schedule_timeout(
+                               ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ);
+
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "Value returned from schedule_timeout:"
+                                    "%d.\n",
+                                    ret);
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "Is shared memory available? (%d).\n",
+                                    orangefs_get_bufmap_init());
+
+                       finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
+
+                       if (orangefs_get_bufmap_init() == 0) {
+                               gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted.  Aborting user's request(%s).\n",
+                                          __func__,
+                                          ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
+                                          get_opname_string(op));
+                               return -EIO;
+                       }
+
+                       /*
+                        * Return to the calling function and re-populate a
+                        * shared memory buffer.
+                        */
+                       return -EAGAIN;
+               }
+       }
+
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "orangefs: service_operation %s returning: %d for %p.\n",
+                    op_name,
+                    ret,
+                    op);
+       return ret;
+}
+
+static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
+{
+       /*
+        * handle interrupted cases depending on what state we were in when
+        * the interruption is detected.  there is a coarse grained lock
+        * across the operation.
+        *
+        * Called with op->lock held.
+        */
+       op->op_state |= OP_VFS_STATE_GIVEN_UP;
+
+       if (op_state_waiting(op)) {
+               /*
+                * upcall hasn't been read; remove op from upcall request
+                * list.
+                */
+               spin_unlock(&op->lock);
+               spin_lock(&orangefs_request_list_lock);
+               list_del(&op->list);
+               spin_unlock(&orangefs_request_list_lock);
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "Interrupted: Removed op %p from request_list\n",
+                            op);
+       } else if (op_state_in_progress(op)) {
+               /* op must be removed from the in progress htable */
+               spin_unlock(&op->lock);
+               spin_lock(&htable_ops_in_progress_lock);
+               list_del(&op->list);
+               spin_unlock(&htable_ops_in_progress_lock);
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "Interrupted: Removed op %p"
+                            " from htable_ops_in_progress\n",
+                            op);
+       } else if (!op_state_serviced(op)) {
+               spin_unlock(&op->lock);
+               gossip_err("interrupted operation is in a weird state 0x%x\n",
+                          op->op_state);
+       } else {
+               /*
+                * It is not intended for execution to flow here,
+                * but having this unlock here makes sparse happy.
+                */
+               gossip_err("%s: can't get here.\n", __func__);
+               spin_unlock(&op->lock);
+       }
+}
+
+/*
+ * sleeps on waitqueue waiting for matching downcall.
+ * if client-core finishes servicing, then we are good to go.
+ * else if client-core exits, we get woken up here, and retry with a timeout
+ *
+ * Post when this call returns to the caller, the specified op will no
+ * longer be on any list or htable.
+ *
+ * Returns 0 on success and -errno on failure
+ * Errors are:
+ * EAGAIN in case we want the caller to requeue and try again..
+ * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
+ * operation since client-core seems to be exiting too often
+ * or if we were interrupted.
+ */
+static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
+{
+       int ret = -EINVAL;
+       DEFINE_WAIT(wait_entry);
+
+       while (1) {
+               spin_lock(&op->lock);
+               prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
+               if (op_state_serviced(op)) {
+                       spin_unlock(&op->lock);
+                       ret = 0;
+                       break;
+               }
+
+               if (unlikely(signal_pending(current))) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "*** %s:"
+                                    " operation interrupted by a signal (tag "
+                                    "%llu, op %p)\n",
+                                    __func__,
+                                    llu(op->tag),
+                                    op);
+                       orangefs_clean_up_interrupted_operation(op);
+                       ret = -EINTR;
+                       break;
+               }
+
+               /*
+                * if this was our first attempt and client-core
+                * has not purged our operation, we are happy to
+                * simply wait
+                */
+               if (op->attempts == 0 && !op_state_purged(op)) {
+                       spin_unlock(&op->lock);
+                       schedule();
+               } else {
+                       spin_unlock(&op->lock);
+                       /*
+                        * subsequent attempts, we retry exactly once
+                        * with timeouts
+                        */
+                       if (!schedule_timeout(op_timeout_secs * HZ)) {
+                               gossip_debug(GOSSIP_WAIT_DEBUG,
+                                            "*** %s:"
+                                            " operation timed out (tag"
+                                            " %llu, %p, att %d)\n",
+                                            __func__,
+                                            llu(op->tag),
+                                            op,
+                                            op->attempts);
+                               ret = -ETIMEDOUT;
+                               spin_lock(&op->lock);
+                               orangefs_clean_up_interrupted_operation(op);
+                               break;
+                       }
+               }
+               spin_lock(&op->lock);
+               op->attempts++;
+               /*
+                * if the operation was purged in the meantime, it
+                * is better to requeue it afresh but ensure that
+                * we have not been purged repeatedly. This could
+                * happen if client-core crashes when an op
+                * is being serviced, so we requeue the op, client
+                * core crashes again so we requeue the op, client
+                * core starts, and so on...
+                */
+               if (op_state_purged(op)) {
+                       ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
+                                -EAGAIN :
+                                -EIO;
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "*** %s:"
+                                    " operation purged (tag "
+                                    "%llu, %p, att %d)\n",
+                                    __func__,
+                                    llu(op->tag),
+                                    op,
+                                    op->attempts);
+                       orangefs_clean_up_interrupted_operation(op);
+                       break;
+               }
+               spin_unlock(&op->lock);
+       }
+
+       spin_lock(&op->lock);
+       finish_wait(&op->waitq, &wait_entry);
+       spin_unlock(&op->lock);
+
+       return ret;
+}
+
+/*
+ * similar to wait_for_matching_downcall(), but used in the special case
+ * of I/O cancellations.
+ *
+ * Note we need a special wait function because if this is called we already
+ *      know that a signal is pending in current and need to service the
+ *      cancellation upcall anyway.  the only way to exit this is to either
+ *      timeout or have the cancellation be serviced properly.
+ */
+static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
+{
+       int ret = -EINVAL;
+       DEFINE_WAIT(wait_entry);
+
+       while (1) {
+               spin_lock(&op->lock);
+               prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
+               if (op_state_serviced(op)) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "%s:op-state is SERVICED.\n",
+                                    __func__);
+                       spin_unlock(&op->lock);
+                       ret = 0;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "%s:operation interrupted by a signal (tag"
+                                    " %llu, op %p)\n",
+                                    __func__,
+                                    llu(op->tag),
+                                    op);
+                       orangefs_clean_up_interrupted_operation(op);
+                       ret = -EINTR;
+                       break;
+               }
+
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:About to call schedule_timeout.\n",
+                            __func__);
+               spin_unlock(&op->lock);
+               ret = schedule_timeout(op_timeout_secs * HZ);
+
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:Value returned from schedule_timeout(%d).\n",
+                            __func__,
+                            ret);
+               if (!ret) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "%s:*** operation timed out: %p\n",
+                                    __func__,
+                                    op);
+                       spin_lock(&op->lock);
+                       orangefs_clean_up_interrupted_operation(op);
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
+                            __func__);
+               ret = -ETIMEDOUT;
+               break;
+       }
+
+       spin_lock(&op->lock);
+       finish_wait(&op->waitq, &wait_entry);
+       spin_unlock(&op->lock);
+
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "%s:returning ret(%d)\n",
+                    __func__,
+                    ret);
+
+       return ret;
+}
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
new file mode 100644 (file)
index 0000000..8e9ccf9
--- /dev/null
@@ -0,0 +1,540 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS extended attribute operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+
+#define SYSTEM_ORANGEFS_KEY "system.pvfs2."
+#define SYSTEM_ORANGEFS_KEY_LEN 13
+
+/*
+ * this function returns
+ *   0 if the key corresponding to name is not meant to be printed as part
+ *     of a listxattr.
+ *   1 if the key corresponding to name is meant to be returned as part of
+ *     a listxattr.
+ * The ones that start SYSTEM_ORANGEFS_KEY are the ones to avoid printing.
+ */
+static int is_reserved_key(const char *key, size_t size)
+{
+
+       if (size < SYSTEM_ORANGEFS_KEY_LEN)
+               return 1;
+
+       return strncmp(key, SYSTEM_ORANGEFS_KEY, SYSTEM_ORANGEFS_KEY_LEN) ?  1 : 0;
+}
+
+static inline int convert_to_internal_xattr_flags(int setxattr_flags)
+{
+       int internal_flag = 0;
+
+       if (setxattr_flags & XATTR_REPLACE) {
+               /* Attribute must exist! */
+               internal_flag = ORANGEFS_XATTR_REPLACE;
+       } else if (setxattr_flags & XATTR_CREATE) {
+               /* Attribute must not exist */
+               internal_flag = ORANGEFS_XATTR_CREATE;
+       }
+       return internal_flag;
+}
+
+
+/*
+ * Tries to get a specified key's attributes of a given
+ * file into a user-specified buffer. Note that the getxattr
+ * interface allows for the users to probe the size of an
+ * extended attribute by passing in a value of 0 to size.
+ * Thus our return value is always the size of the attribute
+ * unless the key does not exist for the file and/or if
+ * there were errors in fetching the attribute value.
+ */
+ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
+               const char *name, void *buffer, size_t size)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op = NULL;
+       ssize_t ret = -ENOMEM;
+       ssize_t length = 0;
+       int fsuid;
+       int fsgid;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "%s: prefix %s name %s, buffer_size %zd\n",
+                    __func__, prefix, name, size);
+
+       if (name == NULL || (size > 0 && buffer == NULL)) {
+               gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
+               return -EINVAL;
+       }
+       if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+               gossip_err("Invalid key length (%d)\n",
+                          (int)(strlen(name) + strlen(prefix)));
+               return -EINVAL;
+       }
+
+       fsuid = from_kuid(current_user_ns(), current_fsuid());
+       fsgid = from_kgid(current_user_ns(), current_fsgid());
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "getxattr on inode %pU, name %s "
+                    "(uid %o, gid %o)\n",
+                    get_khandle_from_ino(inode),
+                    name,
+                    fsuid,
+                    fsgid);
+
+       down_read(&orangefs_inode->xattr_sem);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_GETXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+       new_op->upcall.req.getxattr.refn = orangefs_inode->refn;
+       ret = snprintf((char *)new_op->upcall.req.getxattr.key,
+                      ORANGEFS_MAX_XATTR_NAMELEN, "%s%s", prefix, name);
+
+       /*
+        * NOTE: Although keys are meant to be NULL terminated textual
+        * strings, I am going to explicitly pass the length just in case
+        * we change this later on...
+        */
+       new_op->upcall.req.getxattr.key_sz = ret + 1;
+
+       ret = service_operation(new_op, "orangefs_inode_getxattr",
+                               get_interruptible_flag(inode));
+       if (ret != 0) {
+               if (ret == -ENOENT) {
+                       ret = -ENODATA;
+                       gossip_debug(GOSSIP_XATTR_DEBUG,
+                                    "orangefs_inode_getxattr: inode %pU key %s"
+                                    " does not exist!\n",
+                                    get_khandle_from_ino(inode),
+                                    (char *)new_op->upcall.req.getxattr.key);
+               }
+               goto out_release_op;
+       }
+
+       /*
+        * Length returned includes null terminator.
+        */
+       length = new_op->downcall.resp.getxattr.val_sz;
+
+       /*
+        * Just return the length of the queried attribute.
+        */
+       if (size == 0) {
+               ret = length;
+               goto out_release_op;
+       }
+
+       /*
+        * Check to see if key length is > provided buffer size.
+        */
+       if (length > size) {
+               ret = -ERANGE;
+               goto out_release_op;
+       }
+
+       memset(buffer, 0, size);
+       memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+            "orangefs_inode_getxattr: inode %pU "
+            "key %s key_sz %d, val_len %d\n",
+            get_khandle_from_ino(inode),
+            (char *)new_op->
+               upcall.req.getxattr.key,
+                    (int)new_op->
+               upcall.req.getxattr.key_sz,
+            (int)ret);
+
+       ret = length;
+
+out_release_op:
+       op_release(new_op);
+out_unlock:
+       up_read(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+static int orangefs_inode_removexattr(struct inode *inode,
+                           const char *prefix,
+                           const char *name,
+                           int flags)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int ret = -ENOMEM;
+
+       down_write(&orangefs_inode->xattr_sem);
+       new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+       new_op->upcall.req.removexattr.refn = orangefs_inode->refn;
+       /*
+        * NOTE: Although keys are meant to be NULL terminated
+        * textual strings, I am going to explicitly pass the
+        * length just in case we change this later on...
+        */
+       ret = snprintf((char *)new_op->upcall.req.removexattr.key,
+                      ORANGEFS_MAX_XATTR_NAMELEN,
+                      "%s%s",
+                      (prefix ? prefix : ""),
+                      name);
+       new_op->upcall.req.removexattr.key_sz = ret + 1;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_removexattr: key %s, key_sz %d\n",
+                    (char *)new_op->upcall.req.removexattr.key,
+                    (int)new_op->upcall.req.removexattr.key_sz);
+
+       ret = service_operation(new_op,
+                               "orangefs_inode_removexattr",
+                               get_interruptible_flag(inode));
+       if (ret == -ENOENT) {
+               /*
+                * Request to replace a non-existent attribute is an error.
+                */
+               if (flags & XATTR_REPLACE)
+                       ret = -ENODATA;
+               else
+                       ret = 0;
+       }
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_removexattr: returning %d\n", ret);
+
+       op_release(new_op);
+out_unlock:
+       up_write(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+/*
+ * Tries to set an attribute for a given key on a file.
+ *
+ * Returns a -ve number on error and 0 on success.  Key is text, but value
+ * can be binary!
+ */
+int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
+               const char *name, const void *value, size_t size, int flags)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       int internal_flag = 0;
+       int ret = -ENOMEM;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "%s: prefix %s, name %s, buffer_size %zd\n",
+                    __func__, prefix, name, size);
+
+       if (size < 0 ||
+           size >= ORANGEFS_MAX_XATTR_VALUELEN ||
+           flags < 0) {
+               gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
+                          (int)size,
+                          flags);
+               return -EINVAL;
+       }
+
+       if (name == NULL ||
+           (size > 0 && value == NULL)) {
+               gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
+               return -EINVAL;
+       }
+
+       internal_flag = convert_to_internal_xattr_flags(flags);
+
+       if (prefix) {
+               if (strlen(name) + strlen(prefix) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+                       gossip_err
+                           ("orangefs_inode_setxattr: bogus key size (%d)\n",
+                            (int)(strlen(name) + strlen(prefix)));
+                       return -EINVAL;
+               }
+       } else {
+               if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+                       gossip_err
+                           ("orangefs_inode_setxattr: bogus key size (%d)\n",
+                            (int)(strlen(name)));
+                       return -EINVAL;
+               }
+       }
+
+       /* This is equivalent to a removexattr */
+       if (size == 0 && value == NULL) {
+               gossip_debug(GOSSIP_XATTR_DEBUG,
+                            "removing xattr (%s%s)\n",
+                            prefix,
+                            name);
+               return orangefs_inode_removexattr(inode, prefix, name, flags);
+       }
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "setxattr on inode %pU, name %s\n",
+                    get_khandle_from_ino(inode),
+                    name);
+
+       down_write(&orangefs_inode->xattr_sem);
+       new_op = op_alloc(ORANGEFS_VFS_OP_SETXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+
+       new_op->upcall.req.setxattr.refn = orangefs_inode->refn;
+       new_op->upcall.req.setxattr.flags = internal_flag;
+       /*
+        * NOTE: Although keys are meant to be NULL terminated textual
+        * strings, I am going to explicitly pass the length just in
+        * case we change this later on...
+        */
+       ret = snprintf((char *)new_op->upcall.req.setxattr.keyval.key,
+                      ORANGEFS_MAX_XATTR_NAMELEN,
+                      "%s%s",
+                      prefix, name);
+       new_op->upcall.req.setxattr.keyval.key_sz = ret + 1;
+       memcpy(new_op->upcall.req.setxattr.keyval.val, value, size);
+       new_op->upcall.req.setxattr.keyval.val_sz = size;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_setxattr: key %s, key_sz %d "
+                    " value size %zd\n",
+                    (char *)new_op->upcall.req.setxattr.keyval.key,
+                    (int)new_op->upcall.req.setxattr.keyval.key_sz,
+                    size);
+
+       ret = service_operation(new_op,
+                               "orangefs_inode_setxattr",
+                               get_interruptible_flag(inode));
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_setxattr: returning %d\n",
+                    ret);
+
+       /* when request is serviced properly, free req op struct */
+       op_release(new_op);
+out_unlock:
+       up_write(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+/*
+ * Tries to get a specified object's keys into a user-specified buffer of a
+ * given size.  Note that like the previous instances of xattr routines, this
+ * also allows you to pass in a NULL pointer and 0 size to probe the size for
+ * subsequent memory allocations. Thus our return value is always the size of
+ * all the keys unless there were errors in fetching the keys!
+ */
+ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       __u64 token = ORANGEFS_ITERATE_START;
+       ssize_t ret = -ENOMEM;
+       ssize_t total = 0;
+       ssize_t length = 0;
+       int count_keys = 0;
+       int key_size;
+       int i = 0;
+       int returned_count = 0;
+
+       if (size > 0 && buffer == NULL) {
+               gossip_err("%s: bogus NULL pointers\n", __func__);
+               return -EINVAL;
+       }
+       if (size < 0) {
+               gossip_err("Invalid size (%d)\n", (int)size);
+               return -EINVAL;
+       }
+
+       down_read(&orangefs_inode->xattr_sem);
+       new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+       if (buffer && size > 0)
+               memset(buffer, 0, size);
+
+try_again:
+       key_size = 0;
+       new_op->upcall.req.listxattr.refn = orangefs_inode->refn;
+       new_op->upcall.req.listxattr.token = token;
+       new_op->upcall.req.listxattr.requested_count =
+           (size == 0) ? 0 : ORANGEFS_MAX_XATTR_LISTLEN;
+       ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+       if (ret != 0)
+               goto done;
+
+       if (size == 0) {
+               /*
+                * This is a bit of a big upper limit, but I did not want to
+                * spend too much time getting this correct, since users end
+                * up allocating memory rather than us...
+                */
+               total = new_op->downcall.resp.listxattr.returned_count *
+                       ORANGEFS_MAX_XATTR_NAMELEN;
+               goto done;
+       }
+
+       length = new_op->downcall.resp.listxattr.keylen;
+       if (length == 0)
+               goto done;
+
+       returned_count = new_op->downcall.resp.listxattr.returned_count;
+       if (returned_count < 0 ||
+           returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) {
+               gossip_err("%s: impossible value for returned_count:%d:\n",
+               __func__,
+               returned_count);
+               goto done;
+       }
+
+       /*
+        * Check to see how much can be fit in the buffer. Fit only whole keys.
+        */
+       for (i = 0; i < returned_count; i++) {
+               if (total + new_op->downcall.resp.listxattr.lengths[i] > size)
+                       goto done;
+
+               /*
+                * Since many dumb programs try to setxattr() on our reserved
+                * xattrs this is a feeble attempt at defeating those by not
+                * listing them in the output of listxattr.. sigh
+                */
+               if (is_reserved_key(new_op->downcall.resp.listxattr.key +
+                                   key_size,
+                                   new_op->downcall.resp.
+                                       listxattr.lengths[i])) {
+                       gossip_debug(GOSSIP_XATTR_DEBUG, "Copying key %d -> %s\n",
+                                       i, new_op->downcall.resp.listxattr.key +
+                                               key_size);
+                       memcpy(buffer + total,
+                               new_op->downcall.resp.listxattr.key + key_size,
+                               new_op->downcall.resp.listxattr.lengths[i]);
+                       total += new_op->downcall.resp.listxattr.lengths[i];
+                       count_keys++;
+               } else {
+                       gossip_debug(GOSSIP_XATTR_DEBUG, "[RESERVED] key %d -> %s\n",
+                                       i, new_op->downcall.resp.listxattr.key +
+                                               key_size);
+               }
+               key_size += new_op->downcall.resp.listxattr.lengths[i];
+       }
+
+       /*
+        * Since the buffer was large enough, we might have to continue
+        * fetching more keys!
+        */
+       token = new_op->downcall.resp.listxattr.token;
+       if (token != ORANGEFS_ITERATE_END)
+               goto try_again;
+
+done:
+       gossip_debug(GOSSIP_XATTR_DEBUG, "%s: returning %d"
+                    " [size of buffer %ld] (filled in %d keys)\n",
+                    __func__,
+                    ret ? (int)ret : (int)total,
+                    (long)size,
+                    count_keys);
+       op_release(new_op);
+       if (ret == 0)
+               ret = total;
+out_unlock:
+       up_read(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+static int orangefs_xattr_set_default(const struct xattr_handler *handler,
+                                     struct dentry *dentry,
+                                     const char *name,
+                                     const void *buffer,
+                                     size_t size,
+                                     int flags)
+{
+       return orangefs_inode_setxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                   name,
+                                   buffer,
+                                   size,
+                                   flags);
+}
+
+static int orangefs_xattr_get_default(const struct xattr_handler *handler,
+                                     struct dentry *dentry,
+                                     const char *name,
+                                     void *buffer,
+                                     size_t size)
+{
+       return orangefs_inode_getxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                   name,
+                                   buffer,
+                                   size);
+
+}
+
+static int orangefs_xattr_set_trusted(const struct xattr_handler *handler,
+                                    struct dentry *dentry,
+                                    const char *name,
+                                    const void *buffer,
+                                    size_t size,
+                                    int flags)
+{
+       return orangefs_inode_setxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
+                                   name,
+                                   buffer,
+                                   size,
+                                   flags);
+}
+
+static int orangefs_xattr_get_trusted(const struct xattr_handler *handler,
+                                     struct dentry *dentry,
+                                     const char *name,
+                                     void *buffer,
+                                     size_t size)
+{
+       return orangefs_inode_getxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
+                                   name,
+                                   buffer,
+                                   size);
+}
+
+static struct xattr_handler orangefs_xattr_trusted_handler = {
+       .prefix = ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
+       .get = orangefs_xattr_get_trusted,
+       .set = orangefs_xattr_set_trusted,
+};
+
+static struct xattr_handler orangefs_xattr_default_handler = {
+       /*
+        * NOTE: this is set to be the empty string.
+        * so that all un-prefixed xattrs keys get caught
+        * here!
+        */
+       .prefix = ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+       .get = orangefs_xattr_get_default,
+       .set = orangefs_xattr_set_default,
+};
+
+const struct xattr_handler *orangefs_xattr_handlers[] = {
+       &posix_acl_access_xattr_handler,
+       &posix_acl_default_xattr_handler,
+       &orangefs_xattr_trusted_handler,
+       &orangefs_xattr_default_handler,
+       NULL
+};
index 85d16c67c33eaa8b7a5ac65e38ec465814916619..fa95ab2d36740803e06da898a4851db317b86a9e 100644 (file)
@@ -259,23 +259,29 @@ static int do_maps_open(struct inode *inode, struct file *file,
                                sizeof(struct proc_maps_private));
 }
 
-static pid_t pid_of_stack(struct proc_maps_private *priv,
-                               struct vm_area_struct *vma, bool is_pid)
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static int is_stack(struct proc_maps_private *priv,
+                   struct vm_area_struct *vma, int is_pid)
 {
-       struct inode *inode = priv->inode;
-       struct task_struct *task;
-       pid_t ret = 0;
+       int stack = 0;
+
+       if (is_pid) {
+               stack = vma->vm_start <= vma->vm_mm->start_stack &&
+                       vma->vm_end >= vma->vm_mm->start_stack;
+       } else {
+               struct inode *inode = priv->inode;
+               struct task_struct *task;
 
-       rcu_read_lock();
-       task = pid_task(proc_pid(inode), PIDTYPE_PID);
-       if (task) {
-               task = task_of_stack(task, vma, is_pid);
+               rcu_read_lock();
+               task = pid_task(proc_pid(inode), PIDTYPE_PID);
                if (task)
-                       ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+                       stack = vma_is_stack_for_task(vma, task);
+               rcu_read_unlock();
        }
-       rcu_read_unlock();
-
-       return ret;
+       return stack;
 }
 
 static void
@@ -335,8 +341,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 
        name = arch_vma_name(vma);
        if (!name) {
-               pid_t tid;
-
                if (!mm) {
                        name = "[vdso]";
                        goto done;
@@ -348,21 +352,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                        goto done;
                }
 
-               tid = pid_of_stack(priv, vma, is_pid);
-               if (tid != 0) {
-                       /*
-                        * Thread stack in /proc/PID/task/TID/maps or
-                        * the main process stack.
-                        */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack)) {
-                               name = "[stack]";
-                       } else {
-                               /* Thread stack in /proc/PID/maps */
-                               seq_pad(m, ' ');
-                               seq_printf(m, "[stack:%d]", tid);
-                       }
-               }
+               if (is_stack(priv, vma, is_pid))
+                       name = "[stack]";
        }
 
 done:
@@ -1552,18 +1543,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
                unsigned long addr, unsigned long end, struct mm_walk *walk)
 {
+       pte_t huge_pte = huge_ptep_get(pte);
        struct numa_maps *md;
        struct page *page;
 
-       if (!pte_present(*pte))
+       if (!pte_present(huge_pte))
                return 0;
 
-       page = pte_page(*pte);
+       page = pte_page(huge_pte);
        if (!page)
                return 0;
 
        md = walk->private;
-       gather_stats(page, md, pte_dirty(*pte), 1);
+       gather_stats(page, md, pte_dirty(huge_pte), 1);
        return 0;
 }
 
@@ -1617,19 +1609,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
                seq_file_path(m, file, "\n\t= ");
        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
                seq_puts(m, " heap");
-       } else {
-               pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
-               if (tid != 0) {
-                       /*
-                        * Thread stack in /proc/PID/task/TID/maps or
-                        * the main process stack.
-                        */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack))
-                               seq_puts(m, " stack");
-                       else
-                               seq_printf(m, " stack:%d", tid);
-               }
+       } else if (is_stack(proc_priv, vma, is_pid)) {
+               seq_puts(m, " stack");
        }
 
        if (is_vm_hugetlb_page(vma))
index e0d64c92e4f6576c38a8a4a7cc9b8d13a2b3362e..faacb0c0d857602111bfc04f2e374c451059c358 100644 (file)
@@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm,
        return size;
 }
 
-static pid_t pid_of_stack(struct proc_maps_private *priv,
-                               struct vm_area_struct *vma, bool is_pid)
+static int is_stack(struct proc_maps_private *priv,
+                   struct vm_area_struct *vma, int is_pid)
 {
-       struct inode *inode = priv->inode;
-       struct task_struct *task;
-       pid_t ret = 0;
-
-       rcu_read_lock();
-       task = pid_task(proc_pid(inode), PIDTYPE_PID);
-       if (task) {
-               task = task_of_stack(task, vma, is_pid);
+       struct mm_struct *mm = vma->vm_mm;
+       int stack = 0;
+
+       if (is_pid) {
+               stack = vma->vm_start <= mm->start_stack &&
+                       vma->vm_end >= mm->start_stack;
+       } else {
+               struct inode *inode = priv->inode;
+               struct task_struct *task;
+
+               rcu_read_lock();
+               task = pid_task(proc_pid(inode), PIDTYPE_PID);
                if (task)
-                       ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+                       stack = vma_is_stack_for_task(vma, task);
+               rcu_read_unlock();
        }
-       rcu_read_unlock();
-
-       return ret;
+       return stack;
 }
 
 /*
@@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
        if (file) {
                seq_pad(m, ' ');
                seq_file_path(m, file, "");
-       } else if (mm) {
-               pid_t tid = pid_of_stack(priv, vma, is_pid);
-
-               if (tid != 0) {
-                       seq_pad(m, ' ');
-                       /*
-                        * Thread stack in /proc/PID/task/TID/maps or
-                        * the main process stack.
-                        */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack))
-                               seq_printf(m, "[stack]");
-                       else
-                               seq_printf(m, "[stack:%d]", tid);
-               }
+       } else if (mm && is_stack(priv, vma, is_pid)) {
+               seq_pad(m, ' ');
+               seq_printf(m, "[stack]");
        }
 
        seq_putc(m, '\n');
index 3c3b81bb6dfebbd3d6c3f1f8f1252e46308bf9c9..7e0137bde6d6f23ed44d3ab82c9d32b6aecc8cc3 100644 (file)
@@ -2031,6 +2031,21 @@ int dquot_commit_info(struct super_block *sb, int type)
 }
 EXPORT_SYMBOL(dquot_commit_info);
 
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       int err;
+
+       if (!dqopt->ops[qid->type]->get_next_id)
+               return -ENOSYS;
+       mutex_lock(&dqopt->dqio_mutex);
+       err = dqopt->ops[qid->type]->get_next_id(sb, qid);
+       mutex_unlock(&dqopt->dqio_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL(dquot_get_next_id);
+
 /*
  * Definitions of diskquota operations.
  */
@@ -2042,6 +2057,7 @@ const struct dquot_operations dquot_operations = {
        .write_info     = dquot_commit_info,
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
+       .get_next_id    = dquot_get_next_id,
 };
 EXPORT_SYMBOL(dquot_operations);
 
@@ -2565,6 +2581,27 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
 }
 EXPORT_SYMBOL(dquot_get_dqblk);
 
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
+                        struct qc_dqblk *di)
+{
+       struct dquot *dquot;
+       int err;
+
+       if (!sb->dq_op->get_next_id)
+               return -ENOSYS;
+       err = sb->dq_op->get_next_id(sb, qid);
+       if (err < 0)
+               return err;
+       dquot = dqget(sb, *qid);
+       if (IS_ERR(dquot))
+               return PTR_ERR(dquot);
+       do_get_dqblk(dquot, di);
+       dqput(dquot);
+
+       return 0;
+}
+EXPORT_SYMBOL(dquot_get_next_dqblk);
+
 #define VFS_QC_MASK \
        (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
         QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
@@ -2765,6 +2802,7 @@ const struct quotactl_ops dquot_quotactl_ops = {
        .get_state      = dquot_get_state,
        .set_info       = dquot_set_dqinfo,
        .get_dqblk      = dquot_get_dqblk,
+       .get_nextdqblk  = dquot_get_next_dqblk,
        .set_dqblk      = dquot_set_dqblk
 };
 EXPORT_SYMBOL(dquot_quotactl_ops);
@@ -2776,6 +2814,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = {
        .get_state      = dquot_get_state,
        .set_info       = dquot_set_dqinfo,
        .get_dqblk      = dquot_get_dqblk,
+       .get_nextdqblk  = dquot_get_next_dqblk,
        .set_dqblk      = dquot_set_dqblk
 };
 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
index 3746367098fda369120ccf0eab4ca0249ed64cb4..8e297c92f7d4c18588c0e76ab0831d663f9268ad 100644 (file)
@@ -79,7 +79,7 @@ unsigned int qtype_enforce_flag(int type)
        return 0;
 }
 
-static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
+static int quota_quotaon(struct super_block *sb, int type, qid_t id,
                         struct path *path)
 {
        if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
@@ -222,6 +222,34 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
        return 0;
 }
 
+/*
+ * Return quota for next active quota >= this id, if any exists,
+ * otherwise return -ENOENT via ->get_nextdqblk
+ */
+static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
+                         void __user *addr)
+{
+       struct kqid qid;
+       struct qc_dqblk fdq;
+       struct if_nextdqblk idq;
+       int ret;
+
+       if (!sb->s_qcop->get_nextdqblk)
+               return -ENOSYS;
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
+       if (ret)
+               return ret;
+       /* struct if_nextdqblk is a superset of struct if_dqblk */
+       copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
+       idq.dqb_id = from_kqid(current_user_ns(), qid);
+       if (copy_to_user(addr, &idq, sizeof(idq)))
+               return -EFAULT;
+       return 0;
+}
+
 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
 {
        dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
@@ -625,6 +653,34 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
        return ret;
 }
 
+/*
+ * Return quota for next active quota >= this id, if any exists,
+ * otherwise return -ENOENT via ->get_nextdqblk.
+ */
+static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
+                           void __user *addr)
+{
+       struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
+       struct kqid qid;
+       qid_t id_out;
+       int ret;
+
+       if (!sb->s_qcop->get_nextdqblk)
+               return -ENOSYS;
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
+       if (ret)
+               return ret;
+       id_out = from_kqid(current_user_ns(), qid);
+       copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
+       if (copy_to_user(addr, &fdq, sizeof(fdq)))
+               return -EFAULT;
+       return ret;
+}
+
 static int quota_rmxquota(struct super_block *sb, void __user *addr)
 {
        __u32 flags;
@@ -659,7 +715,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
 
        switch (cmd) {
        case Q_QUOTAON:
-               return quota_quotaon(sb, type, cmd, id, path);
+               return quota_quotaon(sb, type, id, path);
        case Q_QUOTAOFF:
                return quota_quotaoff(sb, type);
        case Q_GETFMT:
@@ -670,6 +726,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
                return quota_setinfo(sb, type, addr);
        case Q_GETQUOTA:
                return quota_getquota(sb, type, id, addr);
+       case Q_GETNEXTQUOTA:
+               return quota_getnextquota(sb, type, id, addr);
        case Q_SETQUOTA:
                return quota_setquota(sb, type, id, addr);
        case Q_SYNC:
@@ -690,6 +748,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
                return quota_setxquota(sb, type, id, addr);
        case Q_XGETQUOTA:
                return quota_getxquota(sb, type, id, addr);
+       case Q_XGETNEXTQUOTA:
+               return quota_getnextxquota(sb, type, id, addr);
        case Q_XQUOTASYNC:
                if (sb->s_flags & MS_RDONLY)
                        return -EROFS;
@@ -708,10 +768,13 @@ static int quotactl_cmd_write(int cmd)
        switch (cmd) {
        case Q_GETFMT:
        case Q_GETINFO:
+       case Q_GETQUOTA:
+       case Q_GETNEXTQUOTA:
        case Q_SYNC:
        case Q_XGETQSTAT:
        case Q_XGETQSTATV:
        case Q_XGETQUOTA:
+       case Q_XGETNEXTQUOTA:
        case Q_XQUOTASYNC:
                return 0;
        }
index 58efb83dec1c870c672a5330fcb4571323c73914..0738972e8d3f01c6469ea2f5c0917e89ba26e81a 100644 (file)
@@ -22,10 +22,9 @@ MODULE_LICENSE("GPL");
 
 #define __QUOTA_QT_PARANOIA
 
-static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
+static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
 {
        unsigned int epb = info->dqi_usable_bs >> 2;
-       qid_t id = from_kqid(&init_user_ns, qid);
 
        depth = info->dqi_qtree_depth - depth - 1;
        while (depth--)
@@ -33,6 +32,13 @@ static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
        return id % epb;
 }
 
+static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
+{
+       qid_t id = from_kqid(&init_user_ns, qid);
+
+       return __get_index(info, id, depth);
+}
+
 /* Number of entries in one blocks */
 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
 {
@@ -668,3 +674,60 @@ int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
        return 0;
 }
 EXPORT_SYMBOL(qtree_release_dquot);
+
+static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
+                       unsigned int blk, int depth)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       __le32 *ref = (__le32 *)buf;
+       ssize_t ret;
+       unsigned int epb = info->dqi_usable_bs >> 2;
+       unsigned int level_inc = 1;
+       int i;
+
+       if (!buf)
+               return -ENOMEM;
+
+       for (i = depth; i < info->dqi_qtree_depth - 1; i++)
+               level_inc *= epb;
+
+       ret = read_blk(info, blk, buf);
+       if (ret < 0) {
+               quota_error(info->dqi_sb,
+                           "Can't read quota tree block %u", blk);
+               goto out_buf;
+       }
+       for (i = __get_index(info, *id, depth); i < epb; i++) {
+               if (ref[i] == cpu_to_le32(0)) {
+                       *id += level_inc;
+                       continue;
+               }
+               if (depth == info->dqi_qtree_depth - 1) {
+                       ret = 0;
+                       goto out_buf;
+               }
+               ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
+               if (ret != -ENOENT)
+                       break;
+       }
+       if (i == epb) {
+               ret = -ENOENT;
+               goto out_buf;
+       }
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
+{
+       qid_t id = from_kqid(&init_user_ns, *qid);
+       int ret;
+
+       ret = find_next_id(info, &id, QT_TREEOFF, 0);
+       if (ret < 0)
+               return ret;
+       *qid = make_kqid(&init_user_ns, qid->type, id);
+       return 0;
+}
+EXPORT_SYMBOL(qtree_get_next_id);
index ed85d4f35c04b40aab3754ad42e483c4de3a1f1e..ca71bf881ad1e33d11c1fdd10b71070c5079358d 100644 (file)
@@ -304,6 +304,11 @@ static int v2_free_file_info(struct super_block *sb, int type)
        return 0;
 }
 
+static int v2_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       return qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid);
+}
+
 static const struct quota_format_ops v2_format_ops = {
        .check_quota_file       = v2_check_quota_file,
        .read_file_info         = v2_read_file_info,
@@ -312,6 +317,7 @@ static const struct quota_format_ops v2_format_ops = {
        .read_dqblk             = v2_read_dquot,
        .commit_dqblk           = v2_write_dquot,
        .release_dqblk          = v2_release_dquot,
+       .get_next_id            = v2_get_next_id,
 };
 
 static struct quota_format_type v2r0_quota_format = {
index c0306ec8ed7b8f5d4eef97fd1cf4c7653c2b1f20..b8f2d1e8c6453c373655be98c03c1118acd71f2c 100644 (file)
@@ -802,6 +802,7 @@ static const struct dquot_operations reiserfs_quota_operations = {
        .write_info = reiserfs_write_info,
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
+       .get_next_id    = dquot_get_next_id,
 };
 
 static const struct quotactl_ops reiserfs_qctl_operations = {
index b94fa6c3c6ebe8c7308aea0052f3afacad9745c4..053818dd6c18be8f228e1c40483e50d78aa78007 100644 (file)
@@ -153,7 +153,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
        if (isalarm(ctx))
                remaining = alarm_expires_remaining(&ctx->t.alarm);
        else
-               remaining = hrtimer_expires_remaining(&ctx->t.tmr);
+               remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
 
        return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
 }
index 541d9c65014dce91e2207759fe5e1e02db60cae5..b51b371b874a0bcace50e83aeba7d3a06f55c67a 100644 (file)
@@ -45,7 +45,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        int block, iblock;
        loff_t nf_pos;
        int flen;
-       unsigned char *fname = NULL;
+       unsigned char *fname = NULL, *copy_name = NULL;
        unsigned char *nameptr;
        uint16_t liu;
        uint8_t lfi;
@@ -143,7 +143,15 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                        if (poffset >= lfi) {
                                nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
                        } else {
-                               nameptr = fname;
+                               if (!copy_name) {
+                                       copy_name = kmalloc(UDF_NAME_LEN,
+                                                           GFP_NOFS);
+                                       if (!copy_name) {
+                                               ret = -ENOMEM;
+                                               goto out;
+                                       }
+                               }
+                               nameptr = copy_name;
                                memcpy(nameptr, fi->fileIdent + liu,
                                       lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
@@ -185,6 +193,7 @@ out:
        brelse(fibh.sbh);
        brelse(epos.bh);
        kfree(fname);
+       kfree(copy_name);
 
        return ret;
 }
index 42eafb91f7ff3c092fd38f6fb6136862ca6508c8..a2ba11eca9955ea873b25e93075273d2e7837ed8 100644 (file)
@@ -165,7 +165,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
        struct fileIdentDesc *fi = NULL;
        loff_t f_pos;
        int block, flen;
-       unsigned char *fname = NULL;
+       unsigned char *fname = NULL, *copy_name = NULL;
        unsigned char *nameptr;
        uint8_t lfi;
        uint16_t liu;
@@ -236,7 +236,15 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                                nameptr = (uint8_t *)(fibh->ebh->b_data +
                                                      poffset - lfi);
                        else {
-                               nameptr = fname;
+                               if (!copy_name) {
+                                       copy_name = kmalloc(UDF_NAME_LEN,
+                                                           GFP_NOFS);
+                                       if (!copy_name) {
+                                               fi = ERR_PTR(-ENOMEM);
+                                               goto out_err;
+                                       }
+                               }
+                               nameptr = copy_name;
                                memcpy(nameptr, fi->fileIdent + liu,
                                        lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
@@ -279,6 +287,7 @@ out_err:
 out_ok:
        brelse(epos.bh);
        kfree(fname);
+       kfree(copy_name);
 
        return fi;
 }
@@ -291,7 +300,7 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
        struct udf_fileident_bh fibh;
        struct fileIdentDesc *fi;
 
-       if (dentry->d_name.len > UDF_NAME_LEN - 2)
+       if (dentry->d_name.len > UDF_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
 #ifdef UDF_RECOVERY
@@ -351,7 +360,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
        struct udf_inode_info *dinfo;
 
        fibh->sbh = fibh->ebh = NULL;
-       name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+       name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS);
        if (!name) {
                *err = -ENOMEM;
                goto out_err;
@@ -362,8 +371,9 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                        *err = -EINVAL;
                        goto out_err;
                }
-               namelen = udf_put_filename(sb, dentry->d_name.name, name,
-                                                dentry->d_name.len);
+               namelen = udf_put_filename(sb, dentry->d_name.name,
+                                          dentry->d_name.len,
+                                          name, UDF_NAME_LEN_CS0);
                if (!namelen) {
                        *err = -ENAMETOOLONG;
                        goto out_err;
@@ -914,7 +924,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 
        iinfo = UDF_I(inode);
        down_write(&iinfo->i_data_sem);
-       name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+       name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS);
        if (!name) {
                err = -ENOMEM;
                goto out_no_entry;
@@ -997,8 +1007,9 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
                }
 
                if (pc->componentType == 5) {
-                       namelen = udf_put_filename(sb, compstart, name,
-                                                  symname - compstart);
+                       namelen = udf_put_filename(sb, compstart,
+                                                  symname - compstart,
+                                                  name, UDF_NAME_LEN_CS0);
                        if (!namelen)
                                goto out_no_entry;
 
index a522c15a0bfd7e5e9e0d758bfbc59185a8cb2609..fa92fe839fda2f989e3d52c368cc7520b80679e5 100644 (file)
@@ -887,18 +887,14 @@ static int udf_find_fileset(struct super_block *sb,
 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 {
        struct primaryVolDesc *pvoldesc;
-       struct ustr *instr, *outstr;
+       uint8_t *outstr;
        struct buffer_head *bh;
        uint16_t ident;
        int ret = -ENOMEM;
 
-       instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
-       if (!instr)
-               return -ENOMEM;
-
-       outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
+       outstr = kmalloc(128, GFP_NOFS);
        if (!outstr)
-               goto out1;
+               return -ENOMEM;
 
        bh = udf_read_tagged(sb, block, block, &ident);
        if (!bh) {
@@ -923,31 +919,25 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 #endif
        }
 
-       if (!udf_build_ustr(instr, pvoldesc->volIdent, 32)) {
-               ret = udf_CS0toUTF8(outstr, instr);
-               if (ret < 0)
-                       goto out_bh;
+       ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+       if (ret < 0)
+               goto out_bh;
 
-               strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
-                       outstr->u_len > 31 ? 31 : outstr->u_len);
-               udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
-       }
+       strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
+       udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
-       if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128)) {
-               ret = udf_CS0toUTF8(outstr, instr);
-               if (ret < 0)
-                       goto out_bh;
+       ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+       if (ret < 0)
+               goto out_bh;
 
-               udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
-       }
+       outstr[ret] = 0;
+       udf_debug("volSetIdent[] = '%s'\n", outstr);
 
        ret = 0;
 out_bh:
        brelse(bh);
 out2:
        kfree(outstr);
-out1:
-       kfree(instr);
        return ret;
 }
 
@@ -2358,7 +2348,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
                                          le32_to_cpu(lvidiu->numDirs)) : 0)
                        + buf->f_bfree;
        buf->f_ffree = buf->f_bfree;
-       buf->f_namelen = UDF_NAME_LEN - 2;
+       buf->f_namelen = UDF_NAME_LEN;
        buf->f_fsid.val[0] = (u32)id;
        buf->f_fsid.val[1] = (u32)(id >> 32);
 
index fa0044b6b81d226223aa7ea61dd56c9f83e146e9..972b70625614f837310e39d41f6d8e836d205144 100644 (file)
@@ -49,8 +49,8 @@ extern __printf(3, 4) void _udf_warn(struct super_block *sb,
 #define UDF_EXTENT_FLAG_MASK   0xC0000000
 
 #define UDF_NAME_PAD           4
-#define UDF_NAME_LEN           256
-#define UDF_PATH_LEN           1023
+#define UDF_NAME_LEN           254
+#define UDF_NAME_LEN_CS0       255
 
 static inline size_t udf_file_entry_alloc_offset(struct inode *inode)
 {
@@ -106,12 +106,6 @@ struct generic_desc {
        __le32          volDescSeqNum;
 };
 
-struct ustr {
-       uint8_t u_cmpID;
-       uint8_t u_name[UDF_NAME_LEN - 2];
-       uint8_t u_len;
-};
-
 
 /* super.c */
 
@@ -214,12 +208,11 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
 }
 
 /* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
-                           int);
-extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
-                           int);
-extern int udf_build_ustr(struct ustr *, dstring *, int);
-extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
+extern int udf_get_filename(struct super_block *, const uint8_t *, int,
+                           uint8_t *, int);
+extern int udf_put_filename(struct super_block *, const uint8_t *, int,
+                           uint8_t *, int);
+extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
index e788a05aab83670de6be03dc48eb519e2d2ae5d9..3ff42f4437f3eb3374fea6b1cd0e839b71b65577 100644 (file)
 
 #include "udf_sb.h"
 
-static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
-                                 int);
-
-static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
-{
-       if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
-               return 0;
-
-       memset(dest, 0, sizeof(struct ustr));
-       memcpy(dest->u_name, src, strlen);
-       dest->u_cmpID = 0x08;
-       dest->u_len = strlen;
-
-       return strlen;
-}
-
-/*
- * udf_build_ustr
- */
-int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
-{
-       int usesize;
-
-       if (!dest || !ptr || !size)
-               return -1;
-       BUG_ON(size < 2);
-
-       usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
-       usesize = min(usesize, size - 2);
-       dest->u_cmpID = ptr[0];
-       dest->u_len = usesize;
-       memcpy(dest->u_name, ptr + 1, usesize);
-       memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
-
-       return 0;
-}
-
-/*
- * udf_build_ustr_exact
- */
-static void udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
-{
-       memset(dest, 0, sizeof(struct ustr));
-       dest->u_cmpID = ptr[0];
-       dest->u_len = exactsize - 1;
-       memcpy(dest->u_name, ptr + 1, exactsize - 1);
-}
-
-/*
- * udf_CS0toUTF8
- *
- * PURPOSE
- *     Convert OSTA Compressed Unicode to the UTF-8 equivalent.
- *
- * PRE-CONDITIONS
- *     utf                     Pointer to UTF-8 output buffer.
- *     ocu                     Pointer to OSTA Compressed Unicode input buffer
- *                             of size UDF_NAME_LEN bytes.
- *                             both of type "struct ustr *"
- *
- * POST-CONDITIONS
- *     <return>                >= 0 on success.
- *
- * HISTORY
- *     November 12, 1997 - Andrew E. Mileski
- *     Written, tested, and released.
- */
-int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+static int udf_uni2char_utf8(wchar_t uni,
+                            unsigned char *out,
+                            int boundlen)
 {
-       const uint8_t *ocu;
-       uint8_t cmp_id, ocu_len;
-       int i;
-
-       ocu_len = ocu_i->u_len;
-       if (ocu_len == 0) {
-               memset(utf_o, 0, sizeof(struct ustr));
-               return 0;
-       }
-
-       cmp_id = ocu_i->u_cmpID;
-       if (cmp_id != 8 && cmp_id != 16) {
-               memset(utf_o, 0, sizeof(struct ustr));
-               pr_err("unknown compression code (%d) stri=%s\n",
-                      cmp_id, ocu_i->u_name);
-               return -EINVAL;
-       }
-
-       ocu = ocu_i->u_name;
-       utf_o->u_len = 0;
-       for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
-
-               /* Expand OSTA compressed Unicode to Unicode */
-               uint32_t c = ocu[i++];
-               if (cmp_id == 16)
-                       c = (c << 8) | ocu[i++];
-
-               /* Compress Unicode to UTF-8 */
-               if (c < 0x80U)
-                       utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
-               else if (c < 0x800U) {
-                       if (utf_o->u_len > (UDF_NAME_LEN - 4))
-                               break;
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0xc0 | (c >> 6));
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0x80 | (c & 0x3f));
-               } else {
-                       if (utf_o->u_len > (UDF_NAME_LEN - 5))
-                               break;
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0xe0 | (c >> 12));
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0x80 |
-                                                         ((c >> 6) & 0x3f));
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0x80 | (c & 0x3f));
-               }
+       int u_len = 0;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       if (uni < 0x80) {
+               out[u_len++] = (unsigned char)uni;
+       } else if (uni < 0x800) {
+               if (boundlen < 2)
+                       return -ENAMETOOLONG;
+               out[u_len++] = (unsigned char)(0xc0 | (uni >> 6));
+               out[u_len++] = (unsigned char)(0x80 | (uni & 0x3f));
+       } else {
+               if (boundlen < 3)
+                       return -ENAMETOOLONG;
+               out[u_len++] = (unsigned char)(0xe0 | (uni >> 12));
+               out[u_len++] = (unsigned char)(0x80 | ((uni >> 6) & 0x3f));
+               out[u_len++] = (unsigned char)(0x80 | (uni & 0x3f));
        }
-       utf_o->u_cmpID = 8;
-
-       return utf_o->u_len;
+       return u_len;
 }
 
-/*
- *
- * udf_UTF8toCS0
- *
- * PURPOSE
- *     Convert UTF-8 to the OSTA Compressed Unicode equivalent.
- *
- * DESCRIPTION
- *     This routine is only called by udf_lookup().
- *
- * PRE-CONDITIONS
- *     ocu                     Pointer to OSTA Compressed Unicode output
- *                             buffer of size UDF_NAME_LEN bytes.
- *     utf                     Pointer to UTF-8 input buffer.
- *     utf_len                 Length of UTF-8 input buffer in bytes.
- *
- * POST-CONDITIONS
- *     <return>                Zero on success.
- *
- * HISTORY
- *     November 12, 1997 - Andrew E. Mileski
- *     Written, tested, and released.
- */
-static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+static int udf_char2uni_utf8(const unsigned char *in,
+                            int boundlen,
+                            wchar_t *uni)
 {
-       unsigned c, i, max_val, utf_char;
-       int utf_cnt, u_len, u_ch;
-
-       memset(ocu, 0, sizeof(dstring) * length);
-       ocu[0] = 8;
-       max_val = 0xffU;
-       u_ch = 1;
+       unsigned int utf_char;
+       unsigned char c;
+       int utf_cnt, u_len;
 
-try_again:
-       u_len = 0U;
-       utf_char = 0U;
-       utf_cnt = 0U;
-       for (i = 0U; i < utf->u_len; i++) {
-               /* Name didn't fit? */
-               if (u_len + 1 + u_ch >= length)
-                       return 0;
-
-               c = (uint8_t)utf->u_name[i];
+       utf_char = 0;
+       utf_cnt = 0;
+       for (u_len = 0; u_len < boundlen;) {
+               c = in[u_len++];
 
                /* Complete a multi-byte UTF-8 character */
                if (utf_cnt) {
-                       utf_char = (utf_char << 6) | (c & 0x3fU);
+                       utf_char = (utf_char << 6) | (c & 0x3f);
                        if (--utf_cnt)
                                continue;
                } else {
                        /* Check for a multi-byte UTF-8 character */
-                       if (c & 0x80U) {
+                       if (c & 0x80) {
                                /* Start a multi-byte UTF-8 character */
-                               if ((c & 0xe0U) == 0xc0U) {
-                                       utf_char = c & 0x1fU;
+                               if ((c & 0xe0) == 0xc0) {
+                                       utf_char = c & 0x1f;
                                        utf_cnt = 1;
-                               } else if ((c & 0xf0U) == 0xe0U) {
-                                       utf_char = c & 0x0fU;
+                               } else if ((c & 0xf0) == 0xe0) {
+                                       utf_char = c & 0x0f;
                                        utf_cnt = 2;
-                               } else if ((c & 0xf8U) == 0xf0U) {
-                                       utf_char = c & 0x07U;
+                               } else if ((c & 0xf8) == 0xf0) {
+                                       utf_char = c & 0x07;
                                        utf_cnt = 3;
-                               } else if ((c & 0xfcU) == 0xf8U) {
-                                       utf_char = c & 0x03U;
+                               } else if ((c & 0xfc) == 0xf8) {
+                                       utf_char = c & 0x03;
                                        utf_cnt = 4;
-                               } else if ((c & 0xfeU) == 0xfcU) {
-                                       utf_char = c & 0x01U;
+                               } else if ((c & 0xfe) == 0xfc) {
+                                       utf_char = c & 0x01;
                                        utf_cnt = 5;
                                } else {
-                                       goto error_out;
+                                       utf_cnt = -1;
+                                       break;
                                }
                                continue;
                        } else {
@@ -228,97 +101,216 @@ try_again:
                                utf_char = c;
                        }
                }
-
-               /* Choose no compression if necessary */
-               if (utf_char > max_val) {
-                       if (max_val == 0xffU) {
-                               max_val = 0xffffU;
-                               ocu[0] = (uint8_t)0x10U;
-                               u_ch = 2;
-                               goto try_again;
-                       }
-                       goto error_out;
-               }
-
-               if (max_val == 0xffffU)
-                       ocu[++u_len] = (uint8_t)(utf_char >> 8);
-               ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
+               *uni = utf_char;
+               break;
        }
-
        if (utf_cnt) {
-error_out:
-               ocu[++u_len] = '?';
-               printk(KERN_DEBUG pr_fmt("bad UTF-8 character\n"));
+               *uni = '?';
+               return -EINVAL;
        }
+       return u_len;
+}
 
-       ocu[length - 1] = (uint8_t)u_len + 1;
+#define ILLEGAL_CHAR_MARK      '_'
+#define EXT_MARK               '.'
+#define CRC_MARK               '#'
+#define EXT_SIZE               5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN                        5
+
+static int udf_name_conv_char(uint8_t *str_o, int str_o_max_len,
+                             int *str_o_idx,
+                             const uint8_t *str_i, int str_i_max_len,
+                             int *str_i_idx,
+                             int u_ch, int *needsCRC,
+                             int (*conv_f)(wchar_t, unsigned char *, int),
+                             int translate)
+{
+       uint32_t c;
+       int illChar = 0;
+       int len, gotch = 0;
+
+       for (; (!gotch) && (*str_i_idx < str_i_max_len); *str_i_idx += u_ch) {
+               if (*str_o_idx >= str_o_max_len) {
+                       *needsCRC = 1;
+                       return gotch;
+               }
 
-       return u_len + 1;
+               /* Expand OSTA compressed Unicode to Unicode */
+               c = str_i[*str_i_idx];
+               if (u_ch > 1)
+                       c = (c << 8) | str_i[*str_i_idx + 1];
+
+               if (translate && (c == '/' || c == 0))
+                       illChar = 1;
+               else if (illChar)
+                       break;
+               else
+                       gotch = 1;
+       }
+       if (illChar) {
+               *needsCRC = 1;
+               c = ILLEGAL_CHAR_MARK;
+               gotch = 1;
+       }
+       if (gotch) {
+               len = conv_f(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx);
+               /* Valid character? */
+               if (len >= 0)
+                       *str_o_idx += len;
+               else if (len == -ENAMETOOLONG) {
+                       *needsCRC = 1;
+                       gotch = 0;
+               } else {
+                       str_o[(*str_o_idx)++] = '?';
+                       *needsCRC = 1;
+               }
+       }
+       return gotch;
 }
 
-static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
-                       const struct ustr *ocu_i)
+static int udf_name_from_CS0(uint8_t *str_o, int str_max_len,
+                            const uint8_t *ocu, int ocu_len,
+                            int (*conv_f)(wchar_t, unsigned char *, int),
+                            int translate)
 {
-       const uint8_t *ocu;
-       uint8_t cmp_id, ocu_len;
-       int i, len;
+       uint32_t c;
+       uint8_t cmp_id;
+       int idx, len;
+       int u_ch;
+       int needsCRC = 0;
+       int ext_i_len, ext_max_len;
+       int str_o_len = 0;      /* Length of resulting output */
+       int ext_o_len = 0;      /* Extension output length */
+       int ext_crc_len = 0;    /* Extension output length if used with CRC */
+       int i_ext = -1;         /* Extension position in input buffer */
+       int o_crc = 0;          /* Rightmost possible output pos for CRC+ext */
+       unsigned short valueCRC;
+       uint8_t ext[EXT_SIZE * NLS_MAX_CHARSET_SIZE + 1];
+       uint8_t crc[CRC_LEN];
 
+       if (str_max_len <= 0)
+               return 0;
 
-       ocu_len = ocu_i->u_len;
        if (ocu_len == 0) {
-               memset(utf_o, 0, sizeof(struct ustr));
+               memset(str_o, 0, str_max_len);
                return 0;
        }
 
-       cmp_id = ocu_i->u_cmpID;
+       cmp_id = ocu[0];
        if (cmp_id != 8 && cmp_id != 16) {
-               memset(utf_o, 0, sizeof(struct ustr));
-               pr_err("unknown compression code (%d) stri=%s\n",
-                      cmp_id, ocu_i->u_name);
+               memset(str_o, 0, str_max_len);
+               pr_err("unknown compression code (%d)\n", cmp_id);
                return -EINVAL;
        }
+       u_ch = cmp_id >> 3;
 
-       ocu = ocu_i->u_name;
-       utf_o->u_len = 0;
-       for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
-               /* Expand OSTA compressed Unicode to Unicode */
-               uint32_t c = ocu[i++];
-               if (cmp_id == 16)
-                       c = (c << 8) | ocu[i++];
+       ocu++;
+       ocu_len--;
 
-               len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
-                                   UDF_NAME_LEN - 2 - utf_o->u_len);
-               /* Valid character? */
-               if (len >= 0)
-                       utf_o->u_len += len;
-               else
-                       utf_o->u_name[utf_o->u_len++] = '?';
+       if (ocu_len % u_ch) {
+               pr_err("incorrect filename length (%d)\n", ocu_len + 1);
+               return -EINVAL;
+       }
+
+       if (translate) {
+               /* Look for extension */
+               for (idx = ocu_len - u_ch, ext_i_len = 0;
+                    (idx >= 0) && (ext_i_len < EXT_SIZE);
+                    idx -= u_ch, ext_i_len++) {
+                       c = ocu[idx];
+                       if (u_ch > 1)
+                               c = (c << 8) | ocu[idx + 1];
+
+                       if (c == EXT_MARK) {
+                               if (ext_i_len)
+                                       i_ext = idx;
+                               break;
+                       }
+               }
+               if (i_ext >= 0) {
+                       /* Convert extension */
+                       ext_max_len = min_t(int, sizeof(ext), str_max_len);
+                       ext[ext_o_len++] = EXT_MARK;
+                       idx = i_ext + u_ch;
+                       while (udf_name_conv_char(ext, ext_max_len, &ext_o_len,
+                                                 ocu, ocu_len, &idx,
+                                                 u_ch, &needsCRC,
+                                                 conv_f, translate)) {
+                               if ((ext_o_len + CRC_LEN) < str_max_len)
+                                       ext_crc_len = ext_o_len;
+                       }
+               }
        }
-       utf_o->u_cmpID = 8;
 
-       return utf_o->u_len;
+       idx = 0;
+       while (1) {
+               if (translate && (idx == i_ext)) {
+                       if (str_o_len > (str_max_len - ext_o_len))
+                               needsCRC = 1;
+                       break;
+               }
+
+               if (!udf_name_conv_char(str_o, str_max_len, &str_o_len,
+                                       ocu, ocu_len, &idx,
+                                       u_ch, &needsCRC, conv_f, translate))
+                       break;
+
+               if (translate &&
+                   (str_o_len <= (str_max_len - ext_o_len - CRC_LEN)))
+                       o_crc = str_o_len;
+       }
+
+       if (translate) {
+               if (str_o_len <= 2 && str_o[0] == '.' &&
+                   (str_o_len == 1 || str_o[1] == '.'))
+                       needsCRC = 1;
+               if (needsCRC) {
+                       str_o_len = o_crc;
+                       valueCRC = crc_itu_t(0, ocu, ocu_len);
+                       crc[0] = CRC_MARK;
+                       crc[1] = hex_asc_upper_hi(valueCRC >> 8);
+                       crc[2] = hex_asc_upper_lo(valueCRC >> 8);
+                       crc[3] = hex_asc_upper_hi(valueCRC);
+                       crc[4] = hex_asc_upper_lo(valueCRC);
+                       len = min_t(int, CRC_LEN, str_max_len - str_o_len);
+                       memcpy(&str_o[str_o_len], crc, len);
+                       str_o_len += len;
+                       ext_o_len = ext_crc_len;
+               }
+               if (ext_o_len > 0) {
+                       memcpy(&str_o[str_o_len], ext, ext_o_len);
+                       str_o_len += ext_o_len;
+               }
+       }
+
+       return str_o_len;
 }
 
-static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
-                       int length)
+static int udf_name_to_CS0(uint8_t *ocu, int ocu_max_len,
+                          const uint8_t *str_i, int str_len,
+                          int (*conv_f)(const unsigned char *, int, wchar_t *))
 {
-       int len;
-       unsigned i, max_val;
-       uint16_t uni_char;
+       int i, len;
+       unsigned int max_val;
+       wchar_t uni_char;
        int u_len, u_ch;
 
-       memset(ocu, 0, sizeof(dstring) * length);
+       if (ocu_max_len <= 0)
+               return 0;
+
+       memset(ocu, 0, ocu_max_len);
        ocu[0] = 8;
-       max_val = 0xffU;
+       max_val = 0xff;
        u_ch = 1;
 
 try_again:
-       u_len = 0U;
-       for (i = 0U; i < uni->u_len; i++) {
+       u_len = 1;
+       for (i = 0; i < str_len; i++) {
                /* Name didn't fit? */
-               if (u_len + 1 + u_ch >= length)
+               if (u_len + u_ch > ocu_max_len)
                        return 0;
-               len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
+               len = conv_f(&str_i[i], str_len - i, &uni_char);
                if (!len)
                        continue;
                /* Invalid character, deal with it */
@@ -328,187 +320,65 @@ try_again:
                }
 
                if (uni_char > max_val) {
-                       max_val = 0xffffU;
-                       ocu[0] = (uint8_t)0x10U;
+                       max_val = 0xffff;
+                       ocu[0] = 0x10;
                        u_ch = 2;
                        goto try_again;
                }
 
-               if (max_val == 0xffffU)
-                       ocu[++u_len] = (uint8_t)(uni_char >> 8);
-               ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
+               if (max_val == 0xffff)
+                       ocu[u_len++] = (uint8_t)(uni_char >> 8);
+               ocu[u_len++] = (uint8_t)(uni_char & 0xff);
                i += len - 1;
        }
 
-       ocu[length - 1] = (uint8_t)u_len + 1;
-       return u_len + 1;
+       return u_len;
 }
 
-int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+{
+       return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+                                udf_uni2char_utf8, 0);
+}
+
+int udf_get_filename(struct super_block *sb, const uint8_t *sname, int slen,
                     uint8_t *dname, int dlen)
 {
-       struct ustr *filename, *unifilename;
+       int (*conv_f)(wchar_t, unsigned char *, int);
        int ret;
 
        if (!slen)
                return -EIO;
 
-       filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
-       if (!filename)
-               return -ENOMEM;
-
-       unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
-       if (!unifilename) {
-               ret = -ENOMEM;
-               goto out1;
-       }
+       if (dlen <= 0)
+               return 0;
 
-       udf_build_ustr_exact(unifilename, sname, slen);
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
-               ret = udf_CS0toUTF8(filename, unifilename);
-               if (ret < 0) {
-                       udf_debug("Failed in udf_get_filename: sname = %s\n",
-                                 sname);
-                       goto out2;
-               }
+               conv_f = udf_uni2char_utf8;
        } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
-               ret = udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
-                                  unifilename);
-               if (ret < 0) {
-                       udf_debug("Failed in udf_get_filename: sname = %s\n",
-                                 sname);
-                       goto out2;
-               }
+               conv_f = UDF_SB(sb)->s_nls_map->uni2char;
        } else
                BUG();
 
-       ret = udf_translate_to_linux(dname, dlen,
-                                    filename->u_name, filename->u_len,
-                                    unifilename->u_name, unifilename->u_len);
+       ret = udf_name_from_CS0(dname, dlen, sname, slen, conv_f, 1);
        /* Zero length filename isn't valid... */
        if (ret == 0)
                ret = -EINVAL;
-out2:
-       kfree(unifilename);
-out1:
-       kfree(filename);
        return ret;
 }
 
-int udf_put_filename(struct super_block *sb, const uint8_t *sname,
-                    uint8_t *dname, int flen)
+int udf_put_filename(struct super_block *sb, const uint8_t *sname, int slen,
+                    uint8_t *dname, int dlen)
 {
-       struct ustr unifilename;
-       int namelen;
-
-       if (!udf_char_to_ustr(&unifilename, sname, flen))
-               return 0;
+       int (*conv_f)(const unsigned char *, int, wchar_t *);
 
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
-               namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
-               if (!namelen)
-                       return 0;
+               conv_f = udf_char2uni_utf8;
        } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
-               namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname,
-                                       &unifilename, UDF_NAME_LEN);
-               if (!namelen)
-                       return 0;
+               conv_f = UDF_SB(sb)->s_nls_map->char2uni;
        } else
-               return 0;
+               BUG();
 
-       return namelen;
+       return udf_name_to_CS0(dname, dlen, sname, slen, conv_f);
 }
 
-#define ILLEGAL_CHAR_MARK      '_'
-#define EXT_MARK               '.'
-#define CRC_MARK               '#'
-#define EXT_SIZE               5
-/* Number of chars we need to store generated CRC to make filename unique */
-#define CRC_LEN                        5
-
-static int udf_translate_to_linux(uint8_t *newName, int newLen,
-                                 uint8_t *udfName, int udfLen,
-                                 uint8_t *fidName, int fidNameLen)
-{
-       int index, newIndex = 0, needsCRC = 0;
-       int extIndex = 0, newExtIndex = 0, hasExt = 0;
-       unsigned short valueCRC;
-       uint8_t curr;
-
-       if (udfName[0] == '.' &&
-           (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
-               needsCRC = 1;
-               newIndex = udfLen;
-               memcpy(newName, udfName, udfLen);
-       } else {
-               for (index = 0; index < udfLen; index++) {
-                       curr = udfName[index];
-                       if (curr == '/' || curr == 0) {
-                               needsCRC = 1;
-                               curr = ILLEGAL_CHAR_MARK;
-                               while (index + 1 < udfLen &&
-                                               (udfName[index + 1] == '/' ||
-                                                udfName[index + 1] == 0))
-                                       index++;
-                       }
-                       if (curr == EXT_MARK &&
-                                       (udfLen - index - 1) <= EXT_SIZE) {
-                               if (udfLen == index + 1)
-                                       hasExt = 0;
-                               else {
-                                       hasExt = 1;
-                                       extIndex = index;
-                                       newExtIndex = newIndex;
-                               }
-                       }
-                       if (newIndex < newLen)
-                               newName[newIndex++] = curr;
-                       else
-                               needsCRC = 1;
-               }
-       }
-       if (needsCRC) {
-               uint8_t ext[EXT_SIZE];
-               int localExtIndex = 0;
-
-               if (hasExt) {
-                       int maxFilenameLen;
-                       for (index = 0;
-                            index < EXT_SIZE && extIndex + index + 1 < udfLen;
-                            index++) {
-                               curr = udfName[extIndex + index + 1];
-
-                               if (curr == '/' || curr == 0) {
-                                       needsCRC = 1;
-                                       curr = ILLEGAL_CHAR_MARK;
-                                       while (extIndex + index + 2 < udfLen &&
-                                             (index + 1 < EXT_SIZE &&
-                                               (udfName[extIndex + index + 2] == '/' ||
-                                                udfName[extIndex + index + 2] == 0)))
-                                               index++;
-                               }
-                               ext[localExtIndex++] = curr;
-                       }
-                       maxFilenameLen = newLen - CRC_LEN - localExtIndex;
-                       if (newIndex > maxFilenameLen)
-                               newIndex = maxFilenameLen;
-                       else
-                               newIndex = newExtIndex;
-               } else if (newIndex > newLen - CRC_LEN)
-                       newIndex = newLen - CRC_LEN;
-               newName[newIndex++] = CRC_MARK;
-               valueCRC = crc_itu_t(0, fidName, fidNameLen);
-               newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
-               newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8);
-               newName[newIndex++] = hex_asc_upper_hi(valueCRC);
-               newName[newIndex++] = hex_asc_upper_lo(valueCRC);
-
-               if (hasExt) {
-                       newName[newIndex++] = EXT_MARK;
-                       for (index = 0; index < localExtIndex; index++)
-                               newName[newIndex++] = ext[index];
-               }
-       }
-
-       return newIndex;
-}
index 444626ddbd1b9ba2baca676b48a62d90299d515e..d9b42425291e37c6a4845c21dd0e1f61d8a76e86 100644 (file)
@@ -118,8 +118,6 @@ xfs_allocbt_free_block(
        xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
                              XFS_EXTENT_BUSY_SKIP_DISCARD);
        xfs_trans_agbtree_delta(cur->bc_tp, -1);
-
-       xfs_trans_binval(cur->bc_tp, bp);
        return 0;
 }
 
index 919756e3ba53591a9132849a6dc7c57771f1aebb..90928bbe693c03bcb5a74aecaac421ba3132bebe 100644 (file)
  * Small attribute lists are packed as tightly as possible so as
  * to fit into the literal area of the inode.
  */
-
-/*
- * Entries are packed toward the top as tight as possible.
- */
-typedef struct xfs_attr_shortform {
-       struct xfs_attr_sf_hdr {        /* constant-structure header block */
-               __be16  totsize;        /* total bytes in shortform list */
-               __u8    count;  /* count of active entries */
-       } hdr;
-       struct xfs_attr_sf_entry {
-               __uint8_t namelen;      /* actual length of name (no NULL) */
-               __uint8_t valuelen;     /* actual length of value (no NULL) */
-               __uint8_t flags;        /* flags bits (see xfs_attr_leaf.h) */
-               __uint8_t nameval[1];   /* name & value bytes concatenated */
-       } list[1];                      /* variable sized array */
-} xfs_attr_shortform_t;
 typedef struct xfs_attr_sf_hdr xfs_attr_sf_hdr_t;
 typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t;
 
index ef00156f4f9616178006df4ef6248b1abb2297ab..6a051662d8f9b2d0dfb184689ddc9130cd9bec74 100644 (file)
@@ -912,7 +912,7 @@ xfs_bmap_local_to_extents(
         * We don't want to deal with the case of keeping inode data inline yet.
         * So sending the data fork of a regular inode is invalid.
         */
-       ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
+       ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
        ifp = XFS_IFORK_PTR(ip, whichfork);
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
 
@@ -1079,7 +1079,7 @@ xfs_bmap_add_attrfork_local(
        if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
                return 0;
 
-       if (S_ISDIR(ip->i_d.di_mode)) {
+       if (S_ISDIR(VFS_I(ip)->i_mode)) {
                memset(&dargs, 0, sizeof(dargs));
                dargs.geo = ip->i_mount->m_dir_geo;
                dargs.dp = ip;
@@ -1091,7 +1091,7 @@ xfs_bmap_add_attrfork_local(
                return xfs_dir2_sf_to_block(&dargs);
        }
 
-       if (S_ISLNK(ip->i_d.di_mode))
+       if (S_ISLNK(VFS_I(ip)->i_mode))
                return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
                                                 flags, XFS_DATA_FORK,
                                                 xfs_symlink_local_to_remote);
index 1637c37bfbaa1cb61ef69e48c52eb95716ecd649..e37508ae589b11ef4009129c82b3a3cce98f3ca6 100644 (file)
@@ -531,7 +531,6 @@ xfs_bmbt_free_block(
 
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
-       xfs_trans_binval(tp, bp);
        return 0;
 }
 
index a0eb18ce3ad38f205f5f3487ea4684937b7b287c..1f88e1ce770f35442f0161466632c68fe0e46153 100644 (file)
@@ -294,6 +294,21 @@ xfs_btree_sblock_verify_crc(
        return true;
 }
 
+static int
+xfs_btree_free_block(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp)
+{
+       int                     error;
+
+       error = cur->bc_ops->free_block(cur, bp);
+       if (!error) {
+               xfs_trans_binval(cur->bc_tp, bp);
+               XFS_BTREE_STATS_INC(cur, free);
+       }
+       return error;
+}
+
 /*
  * Delete the btree cursor.
  */
@@ -3209,6 +3224,7 @@ xfs_btree_kill_iroot(
        int                     level;
        int                     index;
        int                     numrecs;
+       int                     error;
 #ifdef DEBUG
        union xfs_btree_ptr     ptr;
        int                     i;
@@ -3272,8 +3288,6 @@ xfs_btree_kill_iroot(
        cpp = xfs_btree_ptr_addr(cur, 1, cblock);
 #ifdef DEBUG
        for (i = 0; i < numrecs; i++) {
-               int             error;
-
                error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
                if (error) {
                        XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
@@ -3283,8 +3297,11 @@ xfs_btree_kill_iroot(
 #endif
        xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
 
-       cur->bc_ops->free_block(cur, cbp);
-       XFS_BTREE_STATS_INC(cur, free);
+       error = xfs_btree_free_block(cur, cbp);
+       if (error) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+               return error;
+       }
 
        cur->bc_bufs[level - 1] = NULL;
        be16_add_cpu(&block->bb_level, -1);
@@ -3317,14 +3334,12 @@ xfs_btree_kill_root(
         */
        cur->bc_ops->set_root(cur, newroot, -1);
 
-       error = cur->bc_ops->free_block(cur, bp);
+       error = xfs_btree_free_block(cur, bp);
        if (error) {
                XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
                return error;
        }
 
-       XFS_BTREE_STATS_INC(cur, free);
-
        cur->bc_bufs[level] = NULL;
        cur->bc_ra[level] = 0;
        cur->bc_nlevels--;
@@ -3830,10 +3845,9 @@ xfs_btree_delrec(
        }
 
        /* Free the deleted block. */
-       error = cur->bc_ops->free_block(cur, rbp);
+       error = xfs_btree_free_block(cur, rbp);
        if (error)
                goto error0;
-       XFS_BTREE_STATS_INC(cur, free);
 
        /*
         * If we joined with the left neighbor, set the buffer in the
index b14bbd6bb05fad090571bcada4e4867e35040b87..8d4d8bce41bf7873fec0fc8211801207a0a46494 100644 (file)
@@ -641,6 +641,22 @@ xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
  */
 #define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
 
+/*
+ * Entries are packed toward the top as tight as possible.
+ */
+typedef struct xfs_attr_shortform {
+       struct xfs_attr_sf_hdr {        /* constant-structure header block */
+               __be16  totsize;        /* total bytes in shortform list */
+               __u8    count;  /* count of active entries */
+       } hdr;
+       struct xfs_attr_sf_entry {
+               __uint8_t namelen;      /* actual length of name (no NULL) */
+               __uint8_t valuelen;     /* actual length of value (no NULL) */
+               __uint8_t flags;        /* flags bits (see xfs_attr_leaf.h) */
+               __uint8_t nameval[1];   /* name & value bytes concatenated */
+       } list[1];                      /* variable sized array */
+} xfs_attr_shortform_t;
+
 typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
        __be16  base;                     /* base of free region */
        __be16  size;                     /* length of free region */
index 2fb53a5c0a745259d2e18164e8505cb21d704724..af0f9d171f8a012758d778a0bd105e51448e5cf3 100644 (file)
@@ -176,7 +176,7 @@ xfs_dir_isempty(
 {
        xfs_dir2_sf_hdr_t       *sfp;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        if (dp->i_d.di_size == 0)       /* might happen during shutdown. */
                return 1;
        if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
@@ -231,7 +231,7 @@ xfs_dir_init(
        struct xfs_da_args *args;
        int             error;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino);
        if (error)
                return error;
@@ -266,7 +266,7 @@ xfs_dir_createname(
        int                     rval;
        int                     v;              /* type-checking value */
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        if (inum) {
                rval = xfs_dir_ino_validate(tp->t_mountp, inum);
                if (rval)
@@ -364,7 +364,7 @@ xfs_dir_lookup(
        int             v;              /* type-checking value */
        int             lock_mode;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_lookup);
 
        /*
@@ -443,7 +443,7 @@ xfs_dir_removename(
        int             rval;
        int             v;              /* type-checking value */
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_remove);
 
        args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
@@ -505,7 +505,7 @@ xfs_dir_replace(
        int             rval;
        int             v;              /* type-checking value */
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
 
        rval = xfs_dir_ino_validate(tp->t_mountp, inum);
        if (rval)
index c679f3c05b63cb535de34e7de5543a75ac57faac..89c21d771e35edbc026eb7fe7cb373280774b162 100644 (file)
@@ -125,16 +125,8 @@ xfs_inobt_free_block(
        struct xfs_btree_cur    *cur,
        struct xfs_buf          *bp)
 {
-       xfs_fsblock_t           fsbno;
-       int                     error;
-
-       fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp));
-       error = xfs_free_extent(cur->bc_tp, fsbno, 1);
-       if (error)
-               return error;
-
-       xfs_trans_binval(cur->bc_tp, bp);
-       return error;
+       return xfs_free_extent(cur->bc_tp,
+                       XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1);
 }
 
 STATIC int
index 1aabfda669b0bb7bb85dbc8d9889ffd452d17c79..9d9559eb2835a33621e568392fab2c1074022da3 100644 (file)
@@ -195,28 +195,50 @@ xfs_imap_to_bp(
 }
 
 void
-xfs_dinode_from_disk(
-       xfs_icdinode_t          *to,
-       xfs_dinode_t            *from)
+xfs_inode_from_disk(
+       struct xfs_inode        *ip,
+       struct xfs_dinode       *from)
 {
-       to->di_magic = be16_to_cpu(from->di_magic);
-       to->di_mode = be16_to_cpu(from->di_mode);
-       to->di_version = from ->di_version;
+       struct xfs_icdinode     *to = &ip->i_d;
+       struct inode            *inode = VFS_I(ip);
+
+
+       /*
+        * Convert v1 inodes immediately to v2 inode format as this is the
+        * minimum inode version format we support in the rest of the code.
+        */
+       to->di_version = from->di_version;
+       if (to->di_version == 1) {
+               set_nlink(inode, be16_to_cpu(from->di_onlink));
+               to->di_projid_lo = 0;
+               to->di_projid_hi = 0;
+               to->di_version = 2;
+       } else {
+               set_nlink(inode, be32_to_cpu(from->di_nlink));
+               to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
+               to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+       }
+
        to->di_format = from->di_format;
-       to->di_onlink = be16_to_cpu(from->di_onlink);
        to->di_uid = be32_to_cpu(from->di_uid);
        to->di_gid = be32_to_cpu(from->di_gid);
-       to->di_nlink = be32_to_cpu(from->di_nlink);
-       to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
-       to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
-       memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
        to->di_flushiter = be16_to_cpu(from->di_flushiter);
-       to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
-       to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
-       to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
-       to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
-       to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
-       to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
+
+       /*
+        * Time is signed, so need to convert to signed 32 bit before
+        * storing in inode timestamp which may be 64 bit. Otherwise
+        * a time before epoch is converted to a time long after epoch
+        * on 64 bit systems.
+        */
+       inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
+       inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
+       inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
+       inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
+       inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
+       inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
+       inode->i_generation = be32_to_cpu(from->di_gen);
+       inode->i_mode = be16_to_cpu(from->di_mode);
+
        to->di_size = be64_to_cpu(from->di_size);
        to->di_nblocks = be64_to_cpu(from->di_nblocks);
        to->di_extsize = be32_to_cpu(from->di_extsize);
@@ -227,42 +249,96 @@ xfs_dinode_from_disk(
        to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
        to->di_dmstate  = be16_to_cpu(from->di_dmstate);
        to->di_flags    = be16_to_cpu(from->di_flags);
-       to->di_gen      = be32_to_cpu(from->di_gen);
 
        if (to->di_version == 3) {
-               to->di_changecount = be64_to_cpu(from->di_changecount);
+               inode->i_version = be64_to_cpu(from->di_changecount);
                to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
                to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
                to->di_flags2 = be64_to_cpu(from->di_flags2);
-               to->di_ino = be64_to_cpu(from->di_ino);
-               to->di_lsn = be64_to_cpu(from->di_lsn);
-               memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
-               uuid_copy(&to->di_uuid, &from->di_uuid);
        }
 }
 
 void
-xfs_dinode_to_disk(
-       xfs_dinode_t            *to,
-       xfs_icdinode_t          *from)
+xfs_inode_to_disk(
+       struct xfs_inode        *ip,
+       struct xfs_dinode       *to,
+       xfs_lsn_t               lsn)
+{
+       struct xfs_icdinode     *from = &ip->i_d;
+       struct inode            *inode = VFS_I(ip);
+
+       to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
+       to->di_onlink = 0;
+
+       to->di_version = from->di_version;
+       to->di_format = from->di_format;
+       to->di_uid = cpu_to_be32(from->di_uid);
+       to->di_gid = cpu_to_be32(from->di_gid);
+       to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
+       to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+
+       memset(to->di_pad, 0, sizeof(to->di_pad));
+       to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
+       to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
+       to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
+       to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
+       to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
+       to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
+       to->di_nlink = cpu_to_be32(inode->i_nlink);
+       to->di_gen = cpu_to_be32(inode->i_generation);
+       to->di_mode = cpu_to_be16(inode->i_mode);
+
+       to->di_size = cpu_to_be64(from->di_size);
+       to->di_nblocks = cpu_to_be64(from->di_nblocks);
+       to->di_extsize = cpu_to_be32(from->di_extsize);
+       to->di_nextents = cpu_to_be32(from->di_nextents);
+       to->di_anextents = cpu_to_be16(from->di_anextents);
+       to->di_forkoff = from->di_forkoff;
+       to->di_aformat = from->di_aformat;
+       to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
+       to->di_dmstate = cpu_to_be16(from->di_dmstate);
+       to->di_flags = cpu_to_be16(from->di_flags);
+
+       if (from->di_version == 3) {
+               to->di_changecount = cpu_to_be64(inode->i_version);
+               to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
+               to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
+               to->di_flags2 = cpu_to_be64(from->di_flags2);
+
+               to->di_ino = cpu_to_be64(ip->i_ino);
+               to->di_lsn = cpu_to_be64(lsn);
+               memset(to->di_pad2, 0, sizeof(to->di_pad2));
+               uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
+               to->di_flushiter = 0;
+       } else {
+               to->di_flushiter = cpu_to_be16(from->di_flushiter);
+       }
+}
+
+void
+xfs_log_dinode_to_disk(
+       struct xfs_log_dinode   *from,
+       struct xfs_dinode       *to)
 {
        to->di_magic = cpu_to_be16(from->di_magic);
        to->di_mode = cpu_to_be16(from->di_mode);
-       to->di_version = from ->di_version;
+       to->di_version = from->di_version;
        to->di_format = from->di_format;
-       to->di_onlink = cpu_to_be16(from->di_onlink);
+       to->di_onlink = 0;
        to->di_uid = cpu_to_be32(from->di_uid);
        to->di_gid = cpu_to_be32(from->di_gid);
        to->di_nlink = cpu_to_be32(from->di_nlink);
        to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
        to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
        memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
+
        to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
        to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
        to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
        to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
        to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
        to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
+
        to->di_size = cpu_to_be64(from->di_size);
        to->di_nblocks = cpu_to_be64(from->di_nblocks);
        to->di_extsize = cpu_to_be32(from->di_extsize);
@@ -367,13 +443,10 @@ xfs_iread(
            !(mp->m_flags & XFS_MOUNT_IKEEP)) {
                /* initialise the on-disk inode core */
                memset(&ip->i_d, 0, sizeof(ip->i_d));
-               ip->i_d.di_magic = XFS_DINODE_MAGIC;
-               ip->i_d.di_gen = prandom_u32();
-               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               VFS_I(ip)->i_generation = prandom_u32();
+               if (xfs_sb_version_hascrc(&mp->m_sb))
                        ip->i_d.di_version = 3;
-                       ip->i_d.di_ino = ip->i_ino;
-                       uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid);
-               } else
+               else
                        ip->i_d.di_version = 2;
                return 0;
        }
@@ -403,7 +476,7 @@ xfs_iread(
         * Otherwise, just get the truly permanent information.
         */
        if (dip->di_mode) {
-               xfs_dinode_from_disk(&ip->i_d, dip);
+               xfs_inode_from_disk(ip, dip);
                error = xfs_iformat_fork(ip, dip);
                if (error)  {
 #ifdef DEBUG
@@ -417,16 +490,10 @@ xfs_iread(
                 * Partial initialisation of the in-core inode. Just the bits
                 * that xfs_ialloc won't overwrite or relies on being correct.
                 */
-               ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
                ip->i_d.di_version = dip->di_version;
-               ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
+               VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
                ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
 
-               if (dip->di_version == 3) {
-                       ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
-                       uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
-               }
-
                /*
                 * Make sure to pull in the mode here as well in
                 * case the inode is released without being used.
@@ -434,25 +501,10 @@ xfs_iread(
                 * the inode is already free and not try to mess
                 * with the uninitialized part of it.
                 */
-               ip->i_d.di_mode = 0;
-       }
-
-       /*
-        * Automatically convert version 1 inode formats in memory to version 2
-        * inode format. If the inode is modified, it will get logged and
-        * rewritten as a version 2 inode. We can do this because we set the
-        * superblock feature bit for v2 inodes unconditionally during mount
-        * and it means the reast of the code can assume the inode version is 2
-        * or higher.
-        */
-       if (ip->i_d.di_version == 1) {
-               ip->i_d.di_version = 2;
-               memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
-               ip->i_d.di_nlink = ip->i_d.di_onlink;
-               ip->i_d.di_onlink = 0;
-               xfs_set_projid(ip, 0);
+               VFS_I(ip)->i_mode = 0;
        }
 
+       ASSERT(ip->i_d.di_version >= 2);
        ip->i_delayed_blks = 0;
 
        /*
index 9308c47f2a527dc08b75b66de5d064e0b13e0cfe..7c4dd321b2152915c2d9075222b4d757a08539cf 100644 (file)
 
 struct xfs_inode;
 struct xfs_dinode;
-struct xfs_icdinode;
+
+/*
+ * In memory representation of the XFS inode. This is held in the in-core struct
+ * xfs_inode and represents the current on disk values but the structure is not
+ * in on-disk format.  That is, this structure is always translated to on-disk
+ * format specific structures at the appropriate time.
+ */
+struct xfs_icdinode {
+       __int8_t        di_version;     /* inode version */
+       __int8_t        di_format;      /* format of di_c data */
+       __uint16_t      di_flushiter;   /* incremented on flush */
+       __uint32_t      di_uid;         /* owner's user id */
+       __uint32_t      di_gid;         /* owner's group id */
+       __uint16_t      di_projid_lo;   /* lower part of owner's project id */
+       __uint16_t      di_projid_hi;   /* higher part of owner's project id */
+       xfs_fsize_t     di_size;        /* number of bytes in file */
+       xfs_rfsblock_t  di_nblocks;     /* # of direct & btree blocks used */
+       xfs_extlen_t    di_extsize;     /* basic/minimum extent size for file */
+       xfs_extnum_t    di_nextents;    /* number of extents in data fork */
+       xfs_aextnum_t   di_anextents;   /* number of extents in attribute fork*/
+       __uint8_t       di_forkoff;     /* attr fork offs, <<3 for 64b align */
+       __int8_t        di_aformat;     /* format of attr fork's data */
+       __uint32_t      di_dmevmask;    /* DMIG event mask */
+       __uint16_t      di_dmstate;     /* DMIG state info */
+       __uint16_t      di_flags;       /* random flags, XFS_DIFLAG_... */
+
+       __uint64_t      di_flags2;      /* more random flags */
+
+       xfs_ictimestamp_t di_crtime;    /* time created */
+};
 
 /*
  * Inode location information.  Stored in the inode and passed to
@@ -38,8 +67,11 @@ int  xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
 int    xfs_iread(struct xfs_mount *, struct xfs_trans *,
                  struct xfs_inode *, uint);
 void   xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *);
-void   xfs_dinode_to_disk(struct xfs_dinode *to, struct xfs_icdinode *from);
-void   xfs_dinode_from_disk(struct xfs_icdinode *to, struct xfs_dinode *from);
+void   xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to,
+                         xfs_lsn_t lsn);
+void   xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
+void   xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
+                              struct xfs_dinode *to);
 
 #if defined(DEBUG)
 void   xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
index 0defbd02f62d58bb36e62f2cc4738cf862de4e1c..11faf7df14c8099e49759f51f0315dd5caec6632 100644 (file)
@@ -31,6 +31,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_attr_sf.h"
+#include "xfs_da_format.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -120,7 +121,7 @@ xfs_iformat_fork(
                return -EFSCORRUPTED;
        }
 
-       switch (ip->i_d.di_mode & S_IFMT) {
+       switch (VFS_I(ip)->i_mode & S_IFMT) {
        case S_IFIFO:
        case S_IFCHR:
        case S_IFBLK:
index 2653146904153178d474172bcacfec55e7742907..d54a8018b079dd3f0c078e5fdf56cf48a151a545 100644 (file)
@@ -290,6 +290,7 @@ typedef struct xfs_inode_log_format_64 {
        __int32_t               ilf_boffset;    /* off of inode in buffer */
 } xfs_inode_log_format_64_t;
 
+
 /*
  * Flags for xfs_trans_log_inode flags field.
  */
@@ -360,15 +361,15 @@ typedef struct xfs_ictimestamp {
 } xfs_ictimestamp_t;
 
 /*
- * NOTE:  This structure must be kept identical to struct xfs_dinode
- *       except for the endianness annotations.
+ * Define the format of the inode core that is logged. This structure must be
+ * kept identical to struct xfs_dinode except for the endianness annotations.
  */
-typedef struct xfs_icdinode {
+struct xfs_log_dinode {
        __uint16_t      di_magic;       /* inode magic # = XFS_DINODE_MAGIC */
        __uint16_t      di_mode;        /* mode and type of file */
        __int8_t        di_version;     /* inode version */
        __int8_t        di_format;      /* format of di_c data */
-       __uint16_t      di_onlink;      /* old number of links to file */
+       __uint8_t       di_pad3[2];     /* unused in v2/3 inodes */
        __uint32_t      di_uid;         /* owner's user id */
        __uint32_t      di_gid;         /* owner's group id */
        __uint32_t      di_nlink;       /* number of links to file */
@@ -407,13 +408,13 @@ typedef struct xfs_icdinode {
        uuid_t          di_uuid;        /* UUID of the filesystem */
 
        /* structure must be padded to 64 bit alignment */
-} xfs_icdinode_t;
+};
 
-static inline uint xfs_icdinode_size(int version)
+static inline uint xfs_log_dinode_size(int version)
 {
        if (version == 3)
-               return sizeof(struct xfs_icdinode);
-       return offsetof(struct xfs_icdinode, di_next_unlinked);
+               return sizeof(struct xfs_log_dinode);
+       return offsetof(struct xfs_log_dinode, di_next_unlinked);
 }
 
 /*
@@ -495,6 +496,8 @@ enum xfs_blft {
        XFS_BLFT_ATTR_LEAF_BUF,
        XFS_BLFT_ATTR_RMT_BUF,
        XFS_BLFT_SB_BUF,
+       XFS_BLFT_RTBITMAP_BUF,
+       XFS_BLFT_RTSUMMARY_BUF,
        XFS_BLFT_MAX_BUF = (1 << XFS_BLFT_BITS),
 };
 
index f51078f1e92ad4e29b6dbe4c9e4fe8f46f9b2be6..8eed51275bb39b4466f8457b3dc7ba0aa592a583 100644 (file)
@@ -37,7 +37,7 @@ typedef __uint16_t    xfs_qwarncnt_t;
 #define XFS_DQ_PROJ            0x0002          /* project quota */
 #define XFS_DQ_GROUP           0x0004          /* a group quota */
 #define XFS_DQ_DIRTY           0x0008          /* dquot is dirty */
-#define XFS_DQ_FREEING         0x0010          /* dquot is beeing torn down */
+#define XFS_DQ_FREEING         0x0010          /* dquot is being torn down */
 
 #define XFS_DQ_ALLTYPES                (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
 
@@ -116,6 +116,7 @@ typedef __uint16_t  xfs_qwarncnt_t;
 #define XFS_QMOPT_DQREPAIR     0x0001000 /* repair dquot if damaged */
 #define XFS_QMOPT_GQUOTA       0x0002000 /* group dquot requested */
 #define XFS_QMOPT_ENOSPC       0x0004000 /* enospc instead of edquot (prj) */
+#define XFS_QMOPT_DQNEXT       0x0008000 /* return next dquot >= this ID */
 
 /*
  * flags to xfs_trans_mod_dquot to indicate which field needs to be
index 9b59ffa1fc198d4934a575af40716837bc54c11b..951c044e24e40d024e5abf48057c0f0942111acc 100644 (file)
  * Realtime allocator bitmap functions shared with userspace.
  */
 
+/*
+ * Real time buffers need verifiers to avoid runtime warnings during IO.
+ * We don't have anything to verify, however, so these are just dummy
+ * operations.
+ */
+static void
+xfs_rtbuf_verify_read(
+       struct xfs_buf  *bp)
+{
+       return;
+}
+
+static void
+xfs_rtbuf_verify_write(
+       struct xfs_buf  *bp)
+{
+       return;
+}
+
+const struct xfs_buf_ops xfs_rtbuf_ops = {
+       .name = "rtbuf",
+       .verify_read = xfs_rtbuf_verify_read,
+       .verify_write = xfs_rtbuf_verify_write,
+};
+
 /*
  * Get a buffer for the bitmap or summary file block specified.
  * The buffer is returned read and locked.
@@ -68,9 +93,12 @@ xfs_rtbuf_get(
        ASSERT(map.br_startblock != NULLFSBLOCK);
        error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
                                   XFS_FSB_TO_DADDR(mp, map.br_startblock),
-                                  mp->m_bsize, 0, &bp, NULL);
+                                  mp->m_bsize, 0, &bp, &xfs_rtbuf_ops);
        if (error)
                return error;
+
+       xfs_trans_buf_set_type(tp, bp, issum ? XFS_BLFT_RTSUMMARY_BUF
+                                            : XFS_BLFT_RTBITMAP_BUF);
        *bpp = bp;
        return 0;
 }
@@ -983,7 +1011,7 @@ xfs_rtfree_extent(
            mp->m_sb.sb_rextents) {
                if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
                        mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
-               *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+               *(__uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
                xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
        }
        return 0;
index b25bb9a343f33f99ca2bf4392d696c59f80178b4..961e6475a3099bb9acf2c5df67f355f35ffbb3c7 100644 (file)
@@ -27,7 +27,6 @@ extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
 extern void    xfs_perag_put(struct xfs_perag *pag);
 extern int     xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
 
-extern void    xfs_sb_calc_crc(struct xfs_buf *bp);
 extern void    xfs_log_sb(struct xfs_trans *tp);
 extern int     xfs_sync_sb(struct xfs_mount *mp, bool wait);
 extern void    xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
index 15c3ceb845b91a31353a21123450c820fef26c49..81ac870834da9e63515553e3fa291318acd1e73a 100644 (file)
@@ -53,6 +53,7 @@ extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
 extern const struct xfs_buf_ops xfs_sb_buf_ops;
 extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
 extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+extern const struct xfs_buf_ops xfs_rtbuf_ops;
 
 /*
  * Transaction types.  Used to distinguish types of buffers. These never reach
index 379c089fb0514a5a34934b51b790181567a3384f..14ac9822b3036284692228ad57f1956029bc958b 100644 (file)
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
+/* flags for direct write completions */
+#define XFS_DIO_FLAG_UNWRITTEN (1 << 0)
+#define XFS_DIO_FLAG_APPEND    (1 << 1)
+
 void
 xfs_count_page_state(
        struct page             *page,
@@ -214,10 +218,12 @@ xfs_end_io(
        struct xfs_inode *ip = XFS_I(ioend->io_inode);
        int             error = 0;
 
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+       /*
+        * Set an error if the mount has shut down and proceed with end I/O
+        * processing so it can perform whatever cleanups are necessary.
+        */
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                ioend->io_error = -EIO;
-               goto done;
-       }
 
        /*
         * For unwritten extents we need to issue transactions to convert a
@@ -1238,27 +1244,8 @@ xfs_vm_releasepage(
 }
 
 /*
- * When we map a DIO buffer, we may need to attach an ioend that describes the
- * type of write IO we are doing. This passes to the completion function the
- * operations it needs to perform. If the mapping is for an overwrite wholly
- * within the EOF then we don't need an ioend and so we don't allocate one.
- * This avoids the unnecessary overhead of allocating and freeing ioends for
- * workloads that don't require transactions on IO completion.
- *
- * If we get multiple mappings in a single IO, we might be mapping different
- * types. But because the direct IO can only have a single private pointer, we
- * need to ensure that:
- *
- * a) i) the ioend spans the entire region of unwritten mappings; or
- *    ii) the ioend spans all the mappings that cross or are beyond EOF; and
- * b) if it contains unwritten extents, it is *permanently* marked as such
- *
- * We could do this by chaining ioends like buffered IO does, but we only
- * actually get one IO completion callback from the direct IO, and that spans
- * the entire IO regardless of how many mappings and IOs are needed to complete
- * the DIO. There is only going to be one reference to the ioend and its life
- * cycle is constrained by the DIO completion code. hence we don't need
- * reference counting here.
+ * When we map a DIO buffer, we may need to pass flags to
+ * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
  *
  * Note that for DIO, an IO to the highest supported file block offset (i.e.
  * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
@@ -1266,68 +1253,26 @@ xfs_vm_releasepage(
  * extending the file size. We won't know for sure until IO completion is run
  * and the actual max write offset is communicated to the IO completion
  * routine.
- *
- * For DAX page faults, we are preparing to never see unwritten extents here,
- * nor should we ever extend the inode size. Hence we will soon have nothing to
- * do here for this case, ensuring we don't have to provide an IO completion
- * callback to free an ioend that we don't actually need for a fault into the
- * page at offset (2^63 - 1FSB) bytes.
  */
-
 static void
 xfs_map_direct(
        struct inode            *inode,
        struct buffer_head      *bh_result,
        struct xfs_bmbt_irec    *imap,
-       xfs_off_t               offset,
-       bool                    dax_fault)
+       xfs_off_t               offset)
 {
-       struct xfs_ioend        *ioend;
+       uintptr_t               *flags = (uintptr_t *)&bh_result->b_private;
        xfs_off_t               size = bh_result->b_size;
-       int                     type;
 
-       if (ISUNWRITTEN(imap))
-               type = XFS_IO_UNWRITTEN;
-       else
-               type = XFS_IO_OVERWRITE;
-
-       trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
-
-       if (dax_fault) {
-               ASSERT(type == XFS_IO_OVERWRITE);
-               trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
-                                           imap);
-               return;
-       }
-
-       if (bh_result->b_private) {
-               ioend = bh_result->b_private;
-               ASSERT(ioend->io_size > 0);
-               ASSERT(offset >= ioend->io_offset);
-               if (offset + size > ioend->io_offset + ioend->io_size)
-                       ioend->io_size = offset - ioend->io_offset + size;
-
-               if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
-                       ioend->io_type = XFS_IO_UNWRITTEN;
-
-               trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
-                                             ioend->io_size, ioend->io_type,
-                                             imap);
-       } else if (type == XFS_IO_UNWRITTEN ||
-                  offset + size > i_size_read(inode) ||
-                  offset + size < 0) {
-               ioend = xfs_alloc_ioend(inode, type);
-               ioend->io_offset = offset;
-               ioend->io_size = size;
+       trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
+               ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
 
-               bh_result->b_private = ioend;
+       if (ISUNWRITTEN(imap)) {
+               *flags |= XFS_DIO_FLAG_UNWRITTEN;
+               set_buffer_defer_completion(bh_result);
+       } else if (offset + size > i_size_read(inode) || offset + size < 0) {
+               *flags |= XFS_DIO_FLAG_APPEND;
                set_buffer_defer_completion(bh_result);
-
-               trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
-                                          imap);
-       } else {
-               trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
-                                           imap);
        }
 }
 
@@ -1498,9 +1443,12 @@ __xfs_get_blocks(
                if (ISUNWRITTEN(&imap))
                        set_buffer_unwritten(bh_result);
                /* direct IO needs special help */
-               if (create && direct)
-                       xfs_map_direct(inode, bh_result, &imap, offset,
-                                      dax_fault);
+               if (create && direct) {
+                       if (dax_fault)
+                               ASSERT(!ISUNWRITTEN(&imap));
+                       else
+                               xfs_map_direct(inode, bh_result, &imap, offset);
+               }
        }
 
        /*
@@ -1570,42 +1518,50 @@ xfs_get_blocks_dax_fault(
        return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
 }
 
-static void
-__xfs_end_io_direct_write(
-       struct inode            *inode,
-       struct xfs_ioend        *ioend,
+/*
+ * Complete a direct I/O write request.
+ *
+ * xfs_map_direct passes us some flags in the private data to tell us what to
+ * do.  If no flags are set, then the write IO is an overwrite wholly within
+ * the existing allocated file size and so there is nothing for us to do.
+ *
+ * Note that in this case the completion can be called in interrupt context,
+ * whereas if we have flags set we will always be called in task context
+ * (i.e. from a workqueue).
+ */
+STATIC int
+xfs_end_io_direct_write(
+       struct kiocb            *iocb,
        loff_t                  offset,
-       ssize_t                 size)
+       ssize_t                 size,
+       void                    *private)
 {
-       struct xfs_mount        *mp = XFS_I(inode)->i_mount;
+       struct inode            *inode = file_inode(iocb->ki_filp);
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       uintptr_t               flags = (uintptr_t)private;
+       int                     error = 0;
 
-       if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
-               goto out_end_io;
+       trace_xfs_end_io_direct_write(ip, offset, size);
 
-       /*
-        * dio completion end_io functions are only called on writes if more
-        * than 0 bytes was written.
-        */
-       ASSERT(size > 0);
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
 
-       /*
-        * The ioend only maps whole blocks, while the IO may be sector aligned.
-        * Hence the ioend offset/size may not match the IO offset/size exactly.
-        * Because we don't map overwrites within EOF into the ioend, the offset
-        * may not match, but only if the endio spans EOF.  Either way, write
-        * the IO sizes into the ioend so that completion processing does the
-        * right thing.
-        */
-       ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
-       ioend->io_size = size;
-       ioend->io_offset = offset;
+       if (size <= 0)
+               return size;
 
        /*
-        * The ioend tells us whether we are doing unwritten extent conversion
+        * The flags tell us whether we are doing unwritten extent conversions
         * or an append transaction that updates the on-disk file size. These
         * cases are the only cases where we should *potentially* be needing
         * to update the VFS inode size.
-        *
+        */
+       if (flags == 0) {
+               ASSERT(offset + size <= i_size_read(inode));
+               return 0;
+       }
+
+       /*
         * We need to update the in-core inode size here so that we don't end up
         * with the on-disk inode size being outside the in-core inode size. We
         * have no other method of updating EOF for AIO, so always do it here
@@ -1616,91 +1572,56 @@ __xfs_end_io_direct_write(
         * here can result in EOF moving backwards and Bad Things Happen when
         * that occurs.
         */
-       spin_lock(&XFS_I(inode)->i_flags_lock);
+       spin_lock(&ip->i_flags_lock);
        if (offset + size > i_size_read(inode))
                i_size_write(inode, offset + size);
-       spin_unlock(&XFS_I(inode)->i_flags_lock);
+       spin_unlock(&ip->i_flags_lock);
 
-       /*
-        * If we are doing an append IO that needs to update the EOF on disk,
-        * do the transaction reserve now so we can use common end io
-        * processing. Stashing the error (if there is one) in the ioend will
-        * result in the ioend processing passing on the error if it is
-        * possible as we can't return it from here.
-        */
-       if (ioend->io_type == XFS_IO_OVERWRITE)
-               ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
+       if (flags & XFS_DIO_FLAG_UNWRITTEN) {
+               trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
 
-out_end_io:
-       xfs_end_io(&ioend->io_work);
-       return;
-}
+               error = xfs_iomap_write_unwritten(ip, offset, size);
+       } else if (flags & XFS_DIO_FLAG_APPEND) {
+               struct xfs_trans *tp;
 
-/*
- * Complete a direct I/O write request.
- *
- * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
- * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
- * wholly within the EOF and so there is nothing for us to do. Note that in this
- * case the completion can be called in interrupt context, whereas if we have an
- * ioend we will always be called in task context (i.e. from a workqueue).
- */
-STATIC void
-xfs_end_io_direct_write(
-       struct kiocb            *iocb,
-       loff_t                  offset,
-       ssize_t                 size,
-       void                    *private)
-{
-       struct inode            *inode = file_inode(iocb->ki_filp);
-       struct xfs_ioend        *ioend = private;
+               trace_xfs_end_io_direct_write_append(ip, offset, size);
 
-       trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
-                                    ioend ? ioend->io_type : 0, NULL);
-
-       if (!ioend) {
-               ASSERT(offset + size <= i_size_read(inode));
-               return;
+               tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
+               if (error) {
+                       xfs_trans_cancel(tp);
+                       return error;
+               }
+               error = xfs_setfilesize(ip, tp, offset, size);
        }
 
-       __xfs_end_io_direct_write(inode, ioend, offset, size);
+       return error;
 }
 
-static inline ssize_t
-xfs_vm_do_dio(
-       struct inode            *inode,
+STATIC ssize_t
+xfs_vm_direct_IO(
        struct kiocb            *iocb,
        struct iov_iter         *iter,
-       loff_t                  offset,
-       void                    (*endio)(struct kiocb   *iocb,
-                                        loff_t         offset,
-                                        ssize_t        size,
-                                        void           *private),
-       int                     flags)
+       loff_t                  offset)
 {
+       struct inode            *inode = iocb->ki_filp->f_mapping->host;
+       dio_iodone_t            *endio = NULL;
+       int                     flags = 0;
        struct block_device     *bdev;
 
-       if (IS_DAX(inode))
+       if (iov_iter_rw(iter) == WRITE) {
+               endio = xfs_end_io_direct_write;
+               flags = DIO_ASYNC_EXTEND;
+       }
+
+       if (IS_DAX(inode)) {
                return dax_do_io(iocb, inode, iter, offset,
                                 xfs_get_blocks_direct, endio, 0);
+       }
 
        bdev = xfs_find_bdev_for_inode(inode);
        return  __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
-                                    xfs_get_blocks_direct, endio, NULL, flags);
-}
-
-STATIC ssize_t
-xfs_vm_direct_IO(
-       struct kiocb            *iocb,
-       struct iov_iter         *iter,
-       loff_t                  offset)
-{
-       struct inode            *inode = iocb->ki_filp->f_mapping->host;
-
-       if (iov_iter_rw(iter) == WRITE)
-               return xfs_vm_do_dio(inode, iocb, iter, offset,
-                                    xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
-       return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
+                       xfs_get_blocks_direct, endio, NULL, flags);
 }
 
 /*
@@ -1783,14 +1704,22 @@ xfs_vm_write_failed(
                if (block_start >= to)
                        break;
 
-               if (!buffer_delay(bh))
+               /*
+                * Process delalloc and unwritten buffers beyond EOF. We can
+                * encounter unwritten buffers in the event that a file has
+                * post-EOF unwritten extents and an extending write happens to
+                * fail (e.g., an unaligned write that also involves a delalloc
+                * to the same page).
+                */
+               if (!buffer_delay(bh) && !buffer_unwritten(bh))
                        continue;
 
                if (!buffer_new(bh) && block_offset < i_size_read(inode))
                        continue;
 
-               xfs_vm_kill_delalloc_range(inode, block_offset,
-                                          block_offset + bh->b_size);
+               if (buffer_delay(bh))
+                       xfs_vm_kill_delalloc_range(inode, block_offset,
+                                                  block_offset + bh->b_size);
 
                /*
                 * This buffer does not contain data anymore. make sure anyone
@@ -1801,6 +1730,7 @@ xfs_vm_write_failed(
                clear_buffer_mapped(bh);
                clear_buffer_new(bh);
                clear_buffer_dirty(bh);
+               clear_buffer_unwritten(bh);
        }
 
 }
index 45ec9e40150c3dc44bc2268c274472f108ad188b..fd7f51c39b3fe5782c2383feb30f6117d02cfc25 100644 (file)
@@ -202,10 +202,12 @@ xfs_bmap_rtalloc(
                ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 
        /*
-        * Lock out other modifications to the RT bitmap inode.
+        * Lock out modifications to both the RT bitmap and summary inodes
         */
        xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
        xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+       xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 
        /*
         * If it's an allocation to an empty file at offset 0,
@@ -821,7 +823,7 @@ bool
 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 {
        /* prealloc/delalloc exists only on regular files */
-       if (!S_ISREG(ip->i_d.di_mode))
+       if (!S_ISREG(VFS_I(ip)->i_mode))
                return false;
 
        /*
@@ -1726,7 +1728,7 @@ xfs_swap_extents(
        xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
 
        /* Verify that both files have the same format */
-       if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
+       if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
                error = -EINVAL;
                goto out_unlock;
        }
index 435c7de42e5f322a82845382ad7e1fa54dfe3d0b..9a2191b911377f94e38d81d57d5d037a7e19ae8b 100644 (file)
@@ -650,7 +650,7 @@ xfs_buf_read_map(
        if (bp) {
                trace_xfs_buf_read(bp, flags, _RET_IP_);
 
-               if (!XFS_BUF_ISDONE(bp)) {
+               if (!(bp->b_flags & XBF_DONE)) {
                        XFS_STATS_INC(target->bt_mount, xb_get_read);
                        bp->b_ops = ops;
                        _xfs_buf_read(bp, flags);
index c75721acd8679687ae403d9eb5ee463a3cb0dc17..4eb89bd4ee73b6f4265eb63b8238e9571150bf26 100644 (file)
@@ -302,6 +302,7 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
 
 /* Buffer Utility Routines */
 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
+extern void xfs_buf_stale(struct xfs_buf *bp);
 
 /* Delayed Write Buffer Routines */
 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
@@ -312,31 +313,6 @@ extern int xfs_buf_delwri_submit_nowait(struct list_head *);
 extern int xfs_buf_init(void);
 extern void xfs_buf_terminate(void);
 
-#define XFS_BUF_ZEROFLAGS(bp) \
-       ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
-                           XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
-                           XBF_WRITE_FAIL))
-
-void xfs_buf_stale(struct xfs_buf *bp);
-#define XFS_BUF_UNSTALE(bp)    ((bp)->b_flags &= ~XBF_STALE)
-#define XFS_BUF_ISSTALE(bp)    ((bp)->b_flags & XBF_STALE)
-
-#define XFS_BUF_DONE(bp)       ((bp)->b_flags |= XBF_DONE)
-#define XFS_BUF_UNDONE(bp)     ((bp)->b_flags &= ~XBF_DONE)
-#define XFS_BUF_ISDONE(bp)     ((bp)->b_flags & XBF_DONE)
-
-#define XFS_BUF_ASYNC(bp)      ((bp)->b_flags |= XBF_ASYNC)
-#define XFS_BUF_UNASYNC(bp)    ((bp)->b_flags &= ~XBF_ASYNC)
-#define XFS_BUF_ISASYNC(bp)    ((bp)->b_flags & XBF_ASYNC)
-
-#define XFS_BUF_READ(bp)       ((bp)->b_flags |= XBF_READ)
-#define XFS_BUF_UNREAD(bp)     ((bp)->b_flags &= ~XBF_READ)
-#define XFS_BUF_ISREAD(bp)     ((bp)->b_flags & XBF_READ)
-
-#define XFS_BUF_WRITE(bp)      ((bp)->b_flags |= XBF_WRITE)
-#define XFS_BUF_UNWRITE(bp)    ((bp)->b_flags &= ~XBF_WRITE)
-#define XFS_BUF_ISWRITE(bp)    ((bp)->b_flags & XBF_WRITE)
-
 /*
  * These macros use the IO block map rather than b_bn. b_bn is now really
  * just for the buffer cache index for cached buffers. As IO does not use b_bn
index 7e986da34f6cb40ad3aca9e9845f81a070dd2d4d..99e91a0e554ea6512ce5eb43cb8a338804f550ae 100644 (file)
@@ -431,7 +431,7 @@ xfs_buf_item_unpin(
        if (freed && stale) {
                ASSERT(bip->bli_flags & XFS_BLI_STALE);
                ASSERT(xfs_buf_islocked(bp));
-               ASSERT(XFS_BUF_ISSTALE(bp));
+               ASSERT(bp->b_flags & XBF_STALE);
                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 
                trace_xfs_buf_item_unpin_stale(bip);
@@ -493,7 +493,7 @@ xfs_buf_item_unpin(
                xfs_buf_hold(bp);
                bp->b_flags |= XBF_ASYNC;
                xfs_buf_ioerror(bp, -EIO);
-               XFS_BUF_UNDONE(bp);
+               bp->b_flags &= ~XBF_DONE;
                xfs_buf_stale(bp);
                xfs_buf_ioend(bp);
        }
@@ -1067,7 +1067,7 @@ xfs_buf_iodone_callbacks(
         */
        if (XFS_FORCED_SHUTDOWN(mp)) {
                xfs_buf_stale(bp);
-               XFS_BUF_DONE(bp);
+               bp->b_flags |= XBF_DONE;
                trace_xfs_buf_item_iodone(bp, _RET_IP_);
                goto do_callbacks;
        }
@@ -1090,7 +1090,7 @@ xfs_buf_iodone_callbacks(
         * errors tend to affect the whole device and a failing log write
         * will make us give up.  But we really ought to do better here.
         */
-       if (XFS_BUF_ISASYNC(bp)) {
+       if (bp->b_flags & XBF_ASYNC) {
                ASSERT(bp->b_iodone != NULL);
 
                trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
@@ -1113,7 +1113,7 @@ xfs_buf_iodone_callbacks(
         * sure to return the error to the caller of xfs_bwrite().
         */
        xfs_buf_stale(bp);
-       XFS_BUF_DONE(bp);
+       bp->b_flags |= XBF_DONE;
 
        trace_xfs_buf_error_relse(bp, _RET_IP_);
 
index 642d55d100758b10fb3b9deec90de526707c3d98..93b3ab0c54350fbdd6e977e787d6b7c3911b792d 100644 (file)
@@ -665,7 +665,7 @@ xfs_readdir(
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return -EIO;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_getdents);
 
        args.dp = dp;
index 9c44d38dcd1f8ac9a11525e1a4e651a8e298b404..316b2a1bdba5f6da82f1bad0dcbc0708151a59d2 100644 (file)
@@ -92,26 +92,28 @@ xfs_qm_adjust_dqlimits(
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        struct xfs_disk_dquot   *d = &dq->q_core;
+       struct xfs_def_quota    *defq;
        int                     prealloc = 0;
 
        ASSERT(d->d_id);
+       defq = xfs_get_defquota(dq, q);
 
-       if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
-               d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
+       if (defq->bsoftlimit && !d->d_blk_softlimit) {
+               d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
                prealloc = 1;
        }
-       if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
-               d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
+       if (defq->bhardlimit && !d->d_blk_hardlimit) {
+               d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
                prealloc = 1;
        }
-       if (q->qi_isoftlimit && !d->d_ino_softlimit)
-               d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
-       if (q->qi_ihardlimit && !d->d_ino_hardlimit)
-               d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
-       if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
-               d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
-       if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
-               d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
+       if (defq->isoftlimit && !d->d_ino_softlimit)
+               d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
+       if (defq->ihardlimit && !d->d_ino_hardlimit)
+               d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
+       if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
+               d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
+       if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
+               d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
 
        if (prealloc)
                xfs_dquot_set_prealloc_limits(dq);
@@ -232,7 +234,8 @@ xfs_qm_init_dquot_blk(
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        xfs_dqblk_t     *d;
-       int             curid, i;
+       xfs_dqid_t      curid;
+       int             i;
 
        ASSERT(tp);
        ASSERT(xfs_buf_islocked(bp));
@@ -243,7 +246,6 @@ xfs_qm_init_dquot_blk(
         * ID of the first dquot in the block - id's are zero based.
         */
        curid = id - (id % q->qi_dqperchunk);
-       ASSERT(curid >= 0);
        memset(d, 0, BBTOB(q->qi_dqchunklen));
        for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
                d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
@@ -464,12 +466,13 @@ xfs_qm_dqtobp(
        struct xfs_bmbt_irec    map;
        int                     nmaps = 1, error;
        struct xfs_buf          *bp;
-       struct xfs_inode        *quotip = xfs_dq_to_quota_inode(dqp);
+       struct xfs_inode        *quotip;
        struct xfs_mount        *mp = dqp->q_mount;
        xfs_dqid_t              id = be32_to_cpu(dqp->q_core.d_id);
        struct xfs_trans        *tp = (tpp ? *tpp : NULL);
        uint                    lock_mode;
 
+       quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
        dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 
        lock_mode = xfs_ilock_data_map_shared(quotip);
@@ -684,6 +687,56 @@ error0:
        return error;
 }
 
+/*
+ * Advance to the next id in the current chunk, or if at the
+ * end of the chunk, skip ahead to first id in next allocated chunk
+ * using the SEEK_DATA interface.
+ */
+int
+xfs_dq_get_next_id(
+       xfs_mount_t             *mp,
+       uint                    type,
+       xfs_dqid_t              *id,
+       loff_t                  eof)
+{
+       struct xfs_inode        *quotip;
+       xfs_fsblock_t           start;
+       loff_t                  offset;
+       uint                    lock;
+       xfs_dqid_t              next_id;
+       int                     error = 0;
+
+       /* Simple advance */
+       next_id = *id + 1;
+
+       /* If new ID is within the current chunk, advancing it sufficed */
+       if (next_id % mp->m_quotainfo->qi_dqperchunk) {
+               *id = next_id;
+               return 0;
+       }
+
+       /* Nope, next_id is now past the current chunk, so find the next one */
+       start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
+
+       quotip = xfs_quota_inode(mp, type);
+       lock = xfs_ilock_data_map_shared(quotip);
+
+       offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
+                                     eof, SEEK_DATA);
+       if (offset < 0)
+               error = offset;
+
+       xfs_iunlock(quotip, lock);
+
+       /* -ENXIO is essentially "no more data" */
+       if (error)
+               return (error == -ENXIO ? -ENOENT: error);
+
+       /* Convert next data offset back to a quota id */
+       *id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
+       return 0;
+}
+
 /*
  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
  * a locked dquot, doing an allocation (if requested) as needed.
@@ -704,6 +757,7 @@ xfs_qm_dqget(
        struct xfs_quotainfo    *qi = mp->m_quotainfo;
        struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
        struct xfs_dquot        *dqp;
+       loff_t                  eof = 0;
        int                     error;
 
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -731,6 +785,21 @@ xfs_qm_dqget(
        }
 #endif
 
+       /* Get the end of the quota file if we need it */
+       if (flags & XFS_QMOPT_DQNEXT) {
+               struct xfs_inode        *quotip;
+               xfs_fileoff_t           last;
+               uint                    lock_mode;
+
+               quotip = xfs_quota_inode(mp, type);
+               lock_mode = xfs_ilock_data_map_shared(quotip);
+               error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
+               xfs_iunlock(quotip, lock_mode);
+               if (error)
+                       return error;
+               eof = XFS_FSB_TO_B(mp, last);
+       }
+
 restart:
        mutex_lock(&qi->qi_tree_lock);
        dqp = radix_tree_lookup(tree, id);
@@ -744,6 +813,18 @@ restart:
                        goto restart;
                }
 
+               /* uninit / unused quota found in radix tree, keep looking  */
+               if (flags & XFS_QMOPT_DQNEXT) {
+                       if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
+                               xfs_dqunlock(dqp);
+                               mutex_unlock(&qi->qi_tree_lock);
+                               error = xfs_dq_get_next_id(mp, type, &id, eof);
+                               if (error)
+                                       return error;
+                               goto restart;
+                       }
+               }
+
                dqp->q_nrefs++;
                mutex_unlock(&qi->qi_tree_lock);
 
@@ -770,6 +851,13 @@ restart:
        if (ip)
                xfs_ilock(ip, XFS_ILOCK_EXCL);
 
+       /* If we are asked to find next active id, keep looking */
+       if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
+               error = xfs_dq_get_next_id(mp, type, &id, eof);
+               if (!error)
+                       goto restart;
+       }
+
        if (error)
                return error;
 
@@ -820,6 +908,17 @@ restart:
        qi->qi_dquots++;
        mutex_unlock(&qi->qi_tree_lock);
 
+       /* If we are asked to find next active id, keep looking */
+       if (flags & XFS_QMOPT_DQNEXT) {
+               if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
+                       xfs_qm_dqput(dqp);
+                       error = xfs_dq_get_next_id(mp, type, &id, eof);
+                       if (error)
+                               return error;
+                       goto restart;
+               }
+       }
+
  dqret:
        ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
        trace_xfs_dqget_miss(dqp);
index 652cd3c5b58c1cac1562239c9c644a19dbe588b7..2816d42507bc8ab7cf00fecb775b11e19dfb6088 100644 (file)
@@ -152,7 +152,7 @@ xfs_nfs_get_inode(
                return ERR_PTR(error);
        }
 
-       if (ip->i_d.di_gen != generation) {
+       if (VFS_I(ip)->i_generation != generation) {
                IRELE(ip);
                return ERR_PTR(-ESTALE);
        }
index 52883ac3cf84c06761afcf0792bbafcf781d509b..ac0fd32de31e4e5455e43da208cdef4861710a21 100644 (file)
@@ -156,9 +156,9 @@ xfs_update_prealloc_flags(
        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
        if (!(flags & XFS_PREALLOC_INVISIBLE)) {
-               ip->i_d.di_mode &= ~S_ISUID;
-               if (ip->i_d.di_mode & S_IXGRP)
-                       ip->i_d.di_mode &= ~S_ISGID;
+               VFS_I(ip)->i_mode &= ~S_ISUID;
+               if (VFS_I(ip)->i_mode & S_IXGRP)
+                       VFS_I(ip)->i_mode &= ~S_ISGID;
                xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
        }
 
@@ -1337,31 +1337,31 @@ out:
        return found;
 }
 
-STATIC loff_t
-xfs_seek_hole_data(
-       struct file             *file,
+/*
+ * caller must lock inode with xfs_ilock_data_map_shared,
+ * can we craft an appropriate ASSERT?
+ *
+ * end is because the VFS-level lseek interface is defined such that any
+ * offset past i_size shall return -ENXIO, but we use this for quota code
+ * which does not maintain i_size, and we want to SEEK_DATA past i_size.
+ */
+loff_t
+__xfs_seek_hole_data(
+       struct inode            *inode,
        loff_t                  start,
+       loff_t                  end,
        int                     whence)
 {
-       struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        loff_t                  uninitialized_var(offset);
-       xfs_fsize_t             isize;
        xfs_fileoff_t           fsbno;
-       xfs_filblks_t           end;
-       uint                    lock;
+       xfs_filblks_t           lastbno;
        int                     error;
 
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
-
-       lock = xfs_ilock_data_map_shared(ip);
-
-       isize = i_size_read(inode);
-       if (start >= isize) {
+       if (start >= end) {
                error = -ENXIO;
-               goto out_unlock;
+               goto out_error;
        }
 
        /*
@@ -1369,22 +1369,22 @@ xfs_seek_hole_data(
         * by fsbno to the end block of the file.
         */
        fsbno = XFS_B_TO_FSBT(mp, start);
-       end = XFS_B_TO_FSB(mp, isize);
+       lastbno = XFS_B_TO_FSB(mp, end);
 
        for (;;) {
                struct xfs_bmbt_irec    map[2];
                int                     nmap = 2;
                unsigned int            i;
 
-               error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
+               error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
                                       XFS_BMAPI_ENTIRE);
                if (error)
-                       goto out_unlock;
+                       goto out_error;
 
                /* No extents at given offset, must be beyond EOF */
                if (nmap == 0) {
                        error = -ENXIO;
-                       goto out_unlock;
+                       goto out_error;
                }
 
                for (i = 0; i < nmap; i++) {
@@ -1426,7 +1426,7 @@ xfs_seek_hole_data(
                         * hole at the end of any file).
                         */
                        if (whence == SEEK_HOLE) {
-                               offset = isize;
+                               offset = end;
                                break;
                        }
                        /*
@@ -1434,7 +1434,7 @@ xfs_seek_hole_data(
                         */
                        ASSERT(whence == SEEK_DATA);
                        error = -ENXIO;
-                       goto out_unlock;
+                       goto out_error;
                }
 
                ASSERT(i > 1);
@@ -1445,14 +1445,14 @@ xfs_seek_hole_data(
                 */
                fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
                start = XFS_FSB_TO_B(mp, fsbno);
-               if (start >= isize) {
+               if (start >= end) {
                        if (whence == SEEK_HOLE) {
-                               offset = isize;
+                               offset = end;
                                break;
                        }
                        ASSERT(whence == SEEK_DATA);
                        error = -ENXIO;
-                       goto out_unlock;
+                       goto out_error;
                }
        }
 
@@ -1464,7 +1464,39 @@ out:
         * situation in particular.
         */
        if (whence == SEEK_HOLE)
-               offset = min_t(loff_t, offset, isize);
+               offset = min_t(loff_t, offset, end);
+
+       return offset;
+
+out_error:
+       return error;
+}
+
+STATIC loff_t
+xfs_seek_hole_data(
+       struct file             *file,
+       loff_t                  start,
+       int                     whence)
+{
+       struct inode            *inode = file->f_mapping->host;
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       uint                    lock;
+       loff_t                  offset, end;
+       int                     error = 0;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       lock = xfs_ilock_data_map_shared(ip);
+
+       end = i_size_read(inode);
+       offset = __xfs_seek_hole_data(inode, start, end, whence);
+       if (offset < 0) {
+               error = offset;
+               goto out_unlock;
+       }
+
        offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 
 out_unlock:
index c4c130f9bfb64fec1d7d5dccb27963a236477ced..a51353a1f87f1a5e78064c0598f42397ece8f767 100644 (file)
@@ -151,7 +151,7 @@ xfs_filestream_pick_ag(
        xfs_agnumber_t          ag, max_ag = NULLAGNUMBER;
        int                     err, trylock, nscan;
 
-       ASSERT(S_ISDIR(ip->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(ip)->i_mode));
 
        /* 2% of an AG's blocks must be free for it to be chosen. */
        minfree = mp->m_sb.sb_agblocks / 50;
@@ -319,7 +319,7 @@ xfs_filestream_lookup_ag(
        xfs_agnumber_t          startag, ag = NULLAGNUMBER;
        struct xfs_mru_cache_elem *mru;
 
-       ASSERT(S_ISREG(ip->i_d.di_mode));
+       ASSERT(S_ISREG(VFS_I(ip)->i_mode));
 
        pip = xfs_filestream_get_parent(ip);
        if (!pip)
index 1b6a98b66886c76d1fab032673ec88f4cb11afa0..f32713f14f9a21c1b752e2e8eb889dea72411f8e 100644 (file)
@@ -25,6 +25,5 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
                                xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(struct xfs_mount *mp);
 
 #endif /* __XFS_FSOPS_H__ */
index d7a490f24ead08e3abf5019654ee5ee6e2e1eb7b..bf2d60749278602b5b4afcda09ede7d3dd89fd1e 100644 (file)
@@ -63,6 +63,9 @@ xfs_inode_alloc(
                return NULL;
        }
 
+       /* VFS doesn't initialise i_mode! */
+       VFS_I(ip)->i_mode = 0;
+
        XFS_STATS_INC(mp, vn_active);
        ASSERT(atomic_read(&ip->i_pincount) == 0);
        ASSERT(!spin_is_locked(&ip->i_flags_lock));
@@ -79,7 +82,7 @@ xfs_inode_alloc(
        memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
        ip->i_flags = 0;
        ip->i_delayed_blks = 0;
-       memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
+       memset(&ip->i_d, 0, sizeof(ip->i_d));
 
        return ip;
 }
@@ -98,7 +101,7 @@ void
 xfs_inode_free(
        struct xfs_inode        *ip)
 {
-       switch (ip->i_d.di_mode & S_IFMT) {
+       switch (VFS_I(ip)->i_mode & S_IFMT) {
        case S_IFREG:
        case S_IFDIR:
        case S_IFLNK:
@@ -134,6 +137,34 @@ xfs_inode_free(
        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
+/*
+ * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
+ * part of the structure. This is made more complex by the fact we store
+ * information about the on-disk values in the VFS inode and so we can't just
+ * overwrite the values unconditionally. Hence we save the parameters we
+ * need to retain across reinitialisation, and rewrite them into the VFS inode
+ * after reinitialisation even if it fails.
+ */
+static int
+xfs_reinit_inode(
+       struct xfs_mount        *mp,
+       struct inode            *inode)
+{
+       int             error;
+       uint32_t        nlink = inode->i_nlink;
+       uint32_t        generation = inode->i_generation;
+       uint64_t        version = inode->i_version;
+       umode_t         mode = inode->i_mode;
+
+       error = inode_init_always(mp->m_super, inode);
+
+       set_nlink(inode, nlink);
+       inode->i_generation = generation;
+       inode->i_version = version;
+       inode->i_mode = mode;
+       return error;
+}
+
 /*
  * Check the validity of the inode we just found it the cache
  */
@@ -185,7 +216,7 @@ xfs_iget_cache_hit(
        /*
         * If lookup is racing with unlink return an error immediately.
         */
-       if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
+       if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
                error = -ENOENT;
                goto out_error;
        }
@@ -208,7 +239,7 @@ xfs_iget_cache_hit(
                spin_unlock(&ip->i_flags_lock);
                rcu_read_unlock();
 
-               error = inode_init_always(mp->m_super, inode);
+               error = xfs_reinit_inode(mp, inode);
                if (error) {
                        /*
                         * Re-initializing the inode failed, and we are in deep
@@ -295,7 +326,7 @@ xfs_iget_cache_miss(
 
        trace_xfs_iget_miss(ip);
 
-       if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
+       if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
                error = -ENOENT;
                goto out_destroy;
        }
@@ -444,7 +475,7 @@ again:
         * If we have a real type for an on-disk inode, we can setup the inode
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
         */
-       if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
+       if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
                xfs_setup_existing_inode(ip);
        return 0;
 
index ceba1a83cacccd649caf473ebcf2dfae984bb243..96f606deee313aed506b7e7ee229fc801ba5de80 100644 (file)
@@ -57,9 +57,9 @@ kmem_zone_t *xfs_inode_zone;
  */
 #define        XFS_ITRUNC_MAX_EXTENTS  2
 
-STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
-
-STATIC int xfs_iunlink_remove(xfs_trans_t *, xfs_inode_t *);
+STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
+STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
+STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
 
 /*
  * helper function to extract extent size hint from inode
@@ -766,6 +766,7 @@ xfs_ialloc(
        uint            flags;
        int             error;
        struct timespec tv;
+       struct inode    *inode;
 
        /*
         * Call the space management code to pick
@@ -791,6 +792,7 @@ xfs_ialloc(
        if (error)
                return error;
        ASSERT(ip != NULL);
+       inode = VFS_I(ip);
 
        /*
         * We always convert v1 inodes to v2 now - we only support filesystems
@@ -800,20 +802,16 @@ xfs_ialloc(
        if (ip->i_d.di_version == 1)
                ip->i_d.di_version = 2;
 
-       ip->i_d.di_mode = mode;
-       ip->i_d.di_onlink = 0;
-       ip->i_d.di_nlink = nlink;
-       ASSERT(ip->i_d.di_nlink == nlink);
+       inode->i_mode = mode;
+       set_nlink(inode, nlink);
        ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
        ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
        xfs_set_projid(ip, prid);
-       memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
 
        if (pip && XFS_INHERIT_GID(pip)) {
                ip->i_d.di_gid = pip->i_d.di_gid;
-               if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
-                       ip->i_d.di_mode |= S_ISGID;
-               }
+               if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
+                       inode->i_mode |= S_ISGID;
        }
 
        /*
@@ -822,38 +820,29 @@ xfs_ialloc(
         * (and only if the irix_sgid_inherit compatibility variable is set).
         */
        if ((irix_sgid_inherit) &&
-           (ip->i_d.di_mode & S_ISGID) &&
-           (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
-               ip->i_d.di_mode &= ~S_ISGID;
-       }
+           (inode->i_mode & S_ISGID) &&
+           (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
+               inode->i_mode &= ~S_ISGID;
 
        ip->i_d.di_size = 0;
        ip->i_d.di_nextents = 0;
        ASSERT(ip->i_d.di_nblocks == 0);
 
        tv = current_fs_time(mp->m_super);
-       ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
-       ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
-       ip->i_d.di_atime = ip->i_d.di_mtime;
-       ip->i_d.di_ctime = ip->i_d.di_mtime;
+       inode->i_mtime = tv;
+       inode->i_atime = tv;
+       inode->i_ctime = tv;
 
-       /*
-        * di_gen will have been taken care of in xfs_iread.
-        */
        ip->i_d.di_extsize = 0;
        ip->i_d.di_dmevmask = 0;
        ip->i_d.di_dmstate = 0;
        ip->i_d.di_flags = 0;
 
        if (ip->i_d.di_version == 3) {
-               ASSERT(ip->i_d.di_ino == ino);
-               ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid));
-               ip->i_d.di_crc = 0;
-               ip->i_d.di_changecount = 1;
-               ip->i_d.di_lsn = 0;
+               inode->i_version = 1;
                ip->i_d.di_flags2 = 0;
-               memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
-               ip->i_d.di_crtime = ip->i_d.di_mtime;
+               ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
+               ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
        }
 
 
@@ -1092,35 +1081,24 @@ xfs_dir_ialloc(
 }
 
 /*
- * Decrement the link count on an inode & log the change.
- * If this causes the link count to go to zero, initiate the
- * logging activity required to truncate a file.
+ * Decrement the link count on an inode & log the change.  If this causes the
+ * link count to go to zero, move the inode to AGI unlinked list so that it can
+ * be freed when the last active reference goes away via xfs_inactive().
  */
 int                            /* error */
 xfs_droplink(
        xfs_trans_t *tp,
        xfs_inode_t *ip)
 {
-       int     error;
-
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 
-       ASSERT (ip->i_d.di_nlink > 0);
-       ip->i_d.di_nlink--;
        drop_nlink(VFS_I(ip));
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
-       error = 0;
-       if (ip->i_d.di_nlink == 0) {
-               /*
-                * We're dropping the last link to this file.
-                * Move the on-disk inode to the AGI unlinked list.
-                * From xfs_inactive() we will pull the inode from
-                * the list and free it.
-                */
-               error = xfs_iunlink(tp, ip);
-       }
-       return error;
+       if (VFS_I(ip)->i_nlink)
+               return 0;
+
+       return xfs_iunlink(tp, ip);
 }
 
 /*
@@ -1134,8 +1112,6 @@ xfs_bumplink(
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 
        ASSERT(ip->i_d.di_version > 1);
-       ASSERT(ip->i_d.di_nlink > 0 || (VFS_I(ip)->i_state & I_LINKABLE));
-       ip->i_d.di_nlink++;
        inc_nlink(VFS_I(ip));
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
        return 0;
@@ -1393,7 +1369,6 @@ xfs_create_tmpfile(
         */
        xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
 
-       ip->i_d.di_nlink--;
        error = xfs_iunlink(tp, ip);
        if (error)
                goto out_trans_cancel;
@@ -1444,7 +1419,7 @@ xfs_link(
 
        trace_xfs_link(tdp, target_name);
 
-       ASSERT(!S_ISDIR(sip->i_d.di_mode));
+       ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return -EIO;
@@ -1492,7 +1467,10 @@ xfs_link(
 
        xfs_bmap_init(&free_list, &first_block);
 
-       if (sip->i_d.di_nlink == 0) {
+       /*
+        * Handle initial link state of O_TMPFILE inode
+        */
+       if (VFS_I(sip)->i_nlink == 0) {
                error = xfs_iunlink_remove(tp, sip);
                if (error)
                        goto error_return;
@@ -1648,7 +1626,7 @@ xfs_release(
        xfs_mount_t     *mp = ip->i_mount;
        int             error;
 
-       if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
+       if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
                return 0;
 
        /* If this is a read-only mount, don't do this (would generate I/O) */
@@ -1679,7 +1657,7 @@ xfs_release(
                }
        }
 
-       if (ip->i_d.di_nlink == 0)
+       if (VFS_I(ip)->i_nlink == 0)
                return 0;
 
        if (xfs_can_free_eofblocks(ip, false)) {
@@ -1883,7 +1861,7 @@ xfs_inactive(
         * If the inode is already free, then there can be nothing
         * to clean up here.
         */
-       if (ip->i_d.di_mode == 0) {
+       if (VFS_I(ip)->i_mode == 0) {
                ASSERT(ip->i_df.if_real_bytes == 0);
                ASSERT(ip->i_df.if_broot_bytes == 0);
                return;
@@ -1895,7 +1873,7 @@ xfs_inactive(
        if (mp->m_flags & XFS_MOUNT_RDONLY)
                return;
 
-       if (ip->i_d.di_nlink != 0) {
+       if (VFS_I(ip)->i_nlink != 0) {
                /*
                 * force is true because we are evicting an inode from the
                 * cache. Post-eof blocks must be freed, lest we end up with
@@ -1907,7 +1885,7 @@ xfs_inactive(
                return;
        }
 
-       if (S_ISREG(ip->i_d.di_mode) &&
+       if (S_ISREG(VFS_I(ip)->i_mode) &&
            (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
             ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
                truncate = 1;
@@ -1916,7 +1894,7 @@ xfs_inactive(
        if (error)
                return;
 
-       if (S_ISLNK(ip->i_d.di_mode))
+       if (S_ISLNK(VFS_I(ip)->i_mode))
                error = xfs_inactive_symlink(ip);
        else if (truncate)
                error = xfs_inactive_truncate(ip);
@@ -1952,16 +1930,21 @@ xfs_inactive(
 }
 
 /*
- * This is called when the inode's link count goes to 0.
- * We place the on-disk inode on a list in the AGI.  It
- * will be pulled from this list when the inode is freed.
+ * This is called when the inode's link count goes to 0 or we are creating a
+ * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
+ * set to true as the link count is dropped to zero by the VFS after we've
+ * created the file successfully, so we have to add it to the unlinked list
+ * while the link count is non-zero.
+ *
+ * We place the on-disk inode on a list in the AGI.  It will be pulled from this
+ * list when the inode is freed.
  */
-int
+STATIC int
 xfs_iunlink(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip)
+       struct xfs_trans *tp,
+       struct xfs_inode *ip)
 {
-       xfs_mount_t     *mp;
+       xfs_mount_t     *mp = tp->t_mountp;
        xfs_agi_t       *agi;
        xfs_dinode_t    *dip;
        xfs_buf_t       *agibp;
@@ -1971,10 +1954,7 @@ xfs_iunlink(
        int             offset;
        int             error;
 
-       ASSERT(ip->i_d.di_nlink == 0);
-       ASSERT(ip->i_d.di_mode != 0);
-
-       mp = tp->t_mountp;
+       ASSERT(VFS_I(ip)->i_mode != 0);
 
        /*
         * Get the agi buffer first.  It ensures lock ordering
@@ -2412,10 +2392,10 @@ xfs_ifree(
        struct xfs_icluster     xic = { 0 };
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-       ASSERT(ip->i_d.di_nlink == 0);
+       ASSERT(VFS_I(ip)->i_nlink == 0);
        ASSERT(ip->i_d.di_nextents == 0);
        ASSERT(ip->i_d.di_anextents == 0);
-       ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
+       ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
        ASSERT(ip->i_d.di_nblocks == 0);
 
        /*
@@ -2429,7 +2409,7 @@ xfs_ifree(
        if (error)
                return error;
 
-       ip->i_d.di_mode = 0;            /* mark incore inode as free */
+       VFS_I(ip)->i_mode = 0;          /* mark incore inode as free */
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
        ip->i_d.di_forkoff = 0;         /* mark the attr fork not in use */
@@ -2439,7 +2419,7 @@ xfs_ifree(
         * Bump the generation count so no one will be confused
         * by reincarnations of this inode.
         */
-       ip->i_d.di_gen++;
+       VFS_I(ip)->i_generation++;
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
        if (xic.deleted)
@@ -2526,7 +2506,7 @@ xfs_remove(
 {
        xfs_mount_t             *mp = dp->i_mount;
        xfs_trans_t             *tp = NULL;
-       int                     is_dir = S_ISDIR(ip->i_d.di_mode);
+       int                     is_dir = S_ISDIR(VFS_I(ip)->i_mode);
        int                     error = 0;
        xfs_bmap_free_t         free_list;
        xfs_fsblock_t           first_block;
@@ -2580,8 +2560,8 @@ xfs_remove(
         * If we're removing a directory perform some additional validation.
         */
        if (is_dir) {
-               ASSERT(ip->i_d.di_nlink >= 2);
-               if (ip->i_d.di_nlink != 2) {
+               ASSERT(VFS_I(ip)->i_nlink >= 2);
+               if (VFS_I(ip)->i_nlink != 2) {
                        error = -ENOTEMPTY;
                        goto out_trans_cancel;
                }
@@ -2771,7 +2751,7 @@ xfs_cross_rename(
        if (dp1 != dp2) {
                dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
 
-               if (S_ISDIR(ip2->i_d.di_mode)) {
+               if (S_ISDIR(VFS_I(ip2)->i_mode)) {
                        error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
                                                dp1->i_ino, first_block,
                                                free_list, spaceres);
@@ -2779,7 +2759,7 @@ xfs_cross_rename(
                                goto out_trans_abort;
 
                        /* transfer ip2 ".." reference to dp1 */
-                       if (!S_ISDIR(ip1->i_d.di_mode)) {
+                       if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
                                error = xfs_droplink(tp, dp2);
                                if (error)
                                        goto out_trans_abort;
@@ -2798,7 +2778,7 @@ xfs_cross_rename(
                        ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
                }
 
-               if (S_ISDIR(ip1->i_d.di_mode)) {
+               if (S_ISDIR(VFS_I(ip1)->i_mode)) {
                        error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
                                                dp2->i_ino, first_block,
                                                free_list, spaceres);
@@ -2806,7 +2786,7 @@ xfs_cross_rename(
                                goto out_trans_abort;
 
                        /* transfer ip1 ".." reference to dp2 */
-                       if (!S_ISDIR(ip2->i_d.di_mode)) {
+                       if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
                                error = xfs_droplink(tp, dp1);
                                if (error)
                                        goto out_trans_abort;
@@ -2903,7 +2883,7 @@ xfs_rename(
        struct xfs_inode        *inodes[__XFS_SORT_INODES];
        int                     num_inodes = __XFS_SORT_INODES;
        bool                    new_parent = (src_dp != target_dp);
-       bool                    src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
+       bool                    src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
        int                     spaceres;
        int                     error;
 
@@ -3032,12 +3012,12 @@ xfs_rename(
                 * target and source are directories and that target can be
                 * destroyed, or that neither is a directory.
                 */
-               if (S_ISDIR(target_ip->i_d.di_mode)) {
+               if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
                        /*
                         * Make sure target dir is empty.
                         */
                        if (!(xfs_dir_isempty(target_ip)) ||
-                           (target_ip->i_d.di_nlink > 2)) {
+                           (VFS_I(target_ip)->i_nlink > 2)) {
                                error = -EEXIST;
                                goto out_trans_cancel;
                        }
@@ -3144,7 +3124,7 @@ xfs_rename(
         * intermediate state on disk.
         */
        if (wip) {
-               ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
+               ASSERT(VFS_I(wip)->i_nlink == 0);
                error = xfs_bumplink(tp, wip);
                if (error)
                        goto out_bmap_cancel;
@@ -3313,7 +3293,7 @@ cluster_corrupt_out:
                 * mark it as stale and brelse.
                 */
                if (bp->b_iodone) {
-                       XFS_BUF_UNDONE(bp);
+                       bp->b_flags &= ~XBF_DONE;
                        xfs_buf_stale(bp);
                        xfs_buf_ioerror(bp, -EIO);
                        xfs_buf_ioend(bp);
@@ -3462,14 +3442,7 @@ xfs_iflush_int(
                        __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
                goto corrupt_out;
        }
-       if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
-                               mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
-               xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
-                       "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
-                       __func__, ip->i_ino, ip, ip->i_d.di_magic);
-               goto corrupt_out;
-       }
-       if (S_ISREG(ip->i_d.di_mode)) {
+       if (S_ISREG(VFS_I(ip)->i_mode)) {
                if (XFS_TEST_ERROR(
                    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
                    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
@@ -3479,7 +3452,7 @@ xfs_iflush_int(
                                __func__, ip->i_ino, ip);
                        goto corrupt_out;
                }
-       } else if (S_ISDIR(ip->i_d.di_mode)) {
+       } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
                if (XFS_TEST_ERROR(
                    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
                    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
@@ -3523,12 +3496,11 @@ xfs_iflush_int(
                ip->i_d.di_flushiter++;
 
        /*
-        * Copy the dirty parts of the inode into the on-disk
-        * inode.  We always copy out the core of the inode,
-        * because if the inode is dirty at all the core must
-        * be.
+        * Copy the dirty parts of the inode into the on-disk inode.  We always
+        * copy out the core of the inode, because if the inode is dirty at all
+        * the core must be.
         */
-       xfs_dinode_to_disk(dip, &ip->i_d);
+       xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
 
        /* Wrap, we never let the log put out DI_MAX_FLUSH */
        if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
@@ -3580,10 +3552,6 @@ xfs_iflush_int(
         */
        xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
 
-       /* update the lsn in the on disk inode if required */
-       if (ip->i_d.di_version == 3)
-               dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
-
        /* generate the checksum. */
        xfs_dinode_calc_crc(mp, dip);
 
index ca9e11989cbd4f330c6cb0d1a1bede113fd9c8b2..43e1d51b15eb84ca34e978166025b74d30e5b573 100644 (file)
@@ -63,7 +63,7 @@ typedef struct xfs_inode {
        unsigned long           i_flags;        /* see defined flags below */
        unsigned int            i_delayed_blks; /* count of delay alloc blks */
 
-       xfs_icdinode_t          i_d;            /* most of ondisk inode */
+       struct xfs_icdinode     i_d;            /* most of ondisk inode */
 
        /* VFS inode */
        struct inode            i_vnode;        /* embedded VFS inode */
@@ -88,7 +88,7 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
  */
 static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip)
 {
-       if (S_ISREG(ip->i_d.di_mode))
+       if (S_ISREG(VFS_I(ip)->i_mode))
                return i_size_read(VFS_I(ip));
        return ip->i_d.di_size;
 }
@@ -369,7 +369,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
  */
 #define XFS_INHERIT_GID(pip)   \
        (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \
-        ((pip)->i_d.di_mode & S_ISGID))
+        (VFS_I(pip)->i_mode & S_ISGID))
 
 int            xfs_release(struct xfs_inode *ip);
 void           xfs_inactive(struct xfs_inode *ip);
@@ -405,8 +405,6 @@ int         xfs_ifree(struct xfs_trans *, xfs_inode_t *,
                           struct xfs_bmap_free *);
 int            xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
                                      int, xfs_fsize_t);
-int            xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
-
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 
 void           xfs_iunpin_wait(xfs_inode_t *);
@@ -437,6 +435,8 @@ int xfs_update_prealloc_flags(struct xfs_inode *ip,
 int    xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
                     xfs_fsize_t isize, bool *did_zeroing);
 int    xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
+loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start,
+                            loff_t eof, int whence);
 
 
 /* from xfs_iops.c */
index d14b12b8cfefb90f8fe4c92a0033a41cbde2e552..c48b5b18d771fab685e23c03613a1c6e762efcb4 100644 (file)
@@ -135,7 +135,7 @@ xfs_inode_item_size(
 
        *nvecs += 2;
        *nbytes += sizeof(struct xfs_inode_log_format) +
-                  xfs_icdinode_size(ip->i_d.di_version);
+                  xfs_log_dinode_size(ip->i_d.di_version);
 
        xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
        if (XFS_IFORK_Q(ip))
@@ -322,6 +322,81 @@ xfs_inode_item_format_attr_fork(
        }
 }
 
+static void
+xfs_inode_to_log_dinode(
+       struct xfs_inode        *ip,
+       struct xfs_log_dinode   *to,
+       xfs_lsn_t               lsn)
+{
+       struct xfs_icdinode     *from = &ip->i_d;
+       struct inode            *inode = VFS_I(ip);
+
+       to->di_magic = XFS_DINODE_MAGIC;
+
+       to->di_version = from->di_version;
+       to->di_format = from->di_format;
+       to->di_uid = from->di_uid;
+       to->di_gid = from->di_gid;
+       to->di_projid_lo = from->di_projid_lo;
+       to->di_projid_hi = from->di_projid_hi;
+
+       memset(to->di_pad, 0, sizeof(to->di_pad));
+       memset(to->di_pad3, 0, sizeof(to->di_pad3));
+       to->di_atime.t_sec = inode->i_atime.tv_sec;
+       to->di_atime.t_nsec = inode->i_atime.tv_nsec;
+       to->di_mtime.t_sec = inode->i_mtime.tv_sec;
+       to->di_mtime.t_nsec = inode->i_mtime.tv_nsec;
+       to->di_ctime.t_sec = inode->i_ctime.tv_sec;
+       to->di_ctime.t_nsec = inode->i_ctime.tv_nsec;
+       to->di_nlink = inode->i_nlink;
+       to->di_gen = inode->i_generation;
+       to->di_mode = inode->i_mode;
+
+       to->di_size = from->di_size;
+       to->di_nblocks = from->di_nblocks;
+       to->di_extsize = from->di_extsize;
+       to->di_nextents = from->di_nextents;
+       to->di_anextents = from->di_anextents;
+       to->di_forkoff = from->di_forkoff;
+       to->di_aformat = from->di_aformat;
+       to->di_dmevmask = from->di_dmevmask;
+       to->di_dmstate = from->di_dmstate;
+       to->di_flags = from->di_flags;
+
+       if (from->di_version == 3) {
+               to->di_changecount = inode->i_version;
+               to->di_crtime.t_sec = from->di_crtime.t_sec;
+               to->di_crtime.t_nsec = from->di_crtime.t_nsec;
+               to->di_flags2 = from->di_flags2;
+
+               to->di_ino = ip->i_ino;
+               to->di_lsn = lsn;
+               memset(to->di_pad2, 0, sizeof(to->di_pad2));
+               uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
+               to->di_flushiter = 0;
+       } else {
+               to->di_flushiter = from->di_flushiter;
+       }
+}
+
+/*
+ * Format the inode core. Current timestamp data is only in the VFS inode
+ * fields, so we need to grab them from there. Hence rather than just copying
+ * the XFS inode core structure, format the fields directly into the iovec.
+ */
+static void
+xfs_inode_item_format_core(
+       struct xfs_inode        *ip,
+       struct xfs_log_vec      *lv,
+       struct xfs_log_iovec    **vecp)
+{
+       struct xfs_log_dinode   *dic;
+
+       dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
+       xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
+       xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_d.di_version));
+}
+
 /*
  * This is called to fill in the vector of log iovecs for the given inode
  * log item.  It fills the first item with an inode log format structure,
@@ -351,10 +426,7 @@ xfs_inode_item_format(
        ilf->ilf_size = 2; /* format + core */
        xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
 
-       xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICORE,
-                       &ip->i_d,
-                       xfs_icdinode_size(ip->i_d.di_version));
-
+       xfs_inode_item_format_core(ip, lv, &vecp);
        xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
        if (XFS_IFORK_Q(ip)) {
                xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp);
index 478d04e07f9500d6ceed20231deb6b474161c708..81d6d62188037ba6f5f6ccc3dcc68fdeab2ec8c3 100644 (file)
@@ -114,7 +114,7 @@ xfs_find_handle(
                handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
                                        sizeof(handle.ha_fid.fid_len);
                handle.ha_fid.fid_pad = 0;
-               handle.ha_fid.fid_gen = ip->i_d.di_gen;
+               handle.ha_fid.fid_gen = inode->i_generation;
                handle.ha_fid.fid_ino = ip->i_ino;
 
                hsize = XFS_HSIZE(handle);
@@ -963,7 +963,7 @@ xfs_set_diflags(
                di_flags |= XFS_DIFLAG_NODEFRAG;
        if (xflags & FS_XFLAG_FILESTREAM)
                di_flags |= XFS_DIFLAG_FILESTREAM;
-       if (S_ISDIR(ip->i_d.di_mode)) {
+       if (S_ISDIR(VFS_I(ip)->i_mode)) {
                if (xflags & FS_XFLAG_RTINHERIT)
                        di_flags |= XFS_DIFLAG_RTINHERIT;
                if (xflags & FS_XFLAG_NOSYMLINKS)
@@ -972,7 +972,7 @@ xfs_set_diflags(
                        di_flags |= XFS_DIFLAG_EXTSZINHERIT;
                if (xflags & FS_XFLAG_PROJINHERIT)
                        di_flags |= XFS_DIFLAG_PROJINHERIT;
-       } else if (S_ISREG(ip->i_d.di_mode)) {
+       } else if (S_ISREG(VFS_I(ip)->i_mode)) {
                if (xflags & FS_XFLAG_REALTIME)
                        di_flags |= XFS_DIFLAG_REALTIME;
                if (xflags & FS_XFLAG_EXTSIZE)
@@ -1128,14 +1128,14 @@ xfs_ioctl_setattr_check_extsize(
 {
        struct xfs_mount        *mp = ip->i_mount;
 
-       if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode))
+       if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(VFS_I(ip)->i_mode))
                return -EINVAL;
 
        if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
-           !S_ISDIR(ip->i_d.di_mode))
+           !S_ISDIR(VFS_I(ip)->i_mode))
                return -EINVAL;
 
-       if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents &&
+       if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents &&
            ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
                return -EINVAL;
 
@@ -1256,9 +1256,9 @@ xfs_ioctl_setattr(
         * successful return from chown()
         */
 
-       if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+       if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
            !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
-               ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+               VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
 
        /* Change the ownerships and register project quota modifications */
        if (xfs_get_projid(ip) != fa->fsx_projid) {
index 76b71a1c6c323e2043aeab1e93fb5a66db9f39d0..0d38b1d2c420fcc75ffa0cbc28fe3f0db8de17b4 100644 (file)
@@ -459,8 +459,8 @@ xfs_vn_getattr(
 
        stat->size = XFS_ISIZE(ip);
        stat->dev = inode->i_sb->s_dev;
-       stat->mode = ip->i_d.di_mode;
-       stat->nlink = ip->i_d.di_nlink;
+       stat->mode = inode->i_mode;
+       stat->nlink = inode->i_nlink;
        stat->uid = inode->i_uid;
        stat->gid = inode->i_gid;
        stat->ino = ip->i_ino;
@@ -506,9 +506,6 @@ xfs_setattr_mode(
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       ip->i_d.di_mode &= S_IFMT;
-       ip->i_d.di_mode |= mode & ~S_IFMT;
-
        inode->i_mode &= S_IFMT;
        inode->i_mode |= mode & ~S_IFMT;
 }
@@ -522,21 +519,12 @@ xfs_setattr_time(
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       if (iattr->ia_valid & ATTR_ATIME) {
+       if (iattr->ia_valid & ATTR_ATIME)
                inode->i_atime = iattr->ia_atime;
-               ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
-               ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
-       }
-       if (iattr->ia_valid & ATTR_CTIME) {
+       if (iattr->ia_valid & ATTR_CTIME)
                inode->i_ctime = iattr->ia_ctime;
-               ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
-               ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
-       }
-       if (iattr->ia_valid & ATTR_MTIME) {
+       if (iattr->ia_valid & ATTR_MTIME)
                inode->i_mtime = iattr->ia_mtime;
-               ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
-               ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
-       }
 }
 
 int
@@ -661,9 +649,9 @@ xfs_setattr_nonsize(
                 * The set-user-ID and set-group-ID bits of a file will be
                 * cleared upon successful return from chown()
                 */
-               if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+               if ((inode->i_mode & (S_ISUID|S_ISGID)) &&
                    !capable(CAP_FSETID))
-                       ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+                       inode->i_mode &= ~(S_ISUID|S_ISGID);
 
                /*
                 * Change the ownerships and register quota modifications
@@ -773,7 +761,7 @@ xfs_setattr_size(
 
        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
-       ASSERT(S_ISREG(ip->i_d.di_mode));
+       ASSERT(S_ISREG(inode->i_mode));
        ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
                ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
@@ -991,21 +979,13 @@ xfs_vn_update_time(
        }
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
-       if (flags & S_CTIME) {
+       if (flags & S_CTIME)
                inode->i_ctime = *now;
-               ip->i_d.di_ctime.t_sec = (__int32_t)now->tv_sec;
-               ip->i_d.di_ctime.t_nsec = (__int32_t)now->tv_nsec;
-       }
-       if (flags & S_MTIME) {
+       if (flags & S_MTIME)
                inode->i_mtime = *now;
-               ip->i_d.di_mtime.t_sec = (__int32_t)now->tv_sec;
-               ip->i_d.di_mtime.t_nsec = (__int32_t)now->tv_nsec;
-       }
-       if (flags & S_ATIME) {
+       if (flags & S_ATIME)
                inode->i_atime = *now;
-               ip->i_d.di_atime.t_sec = (__int32_t)now->tv_sec;
-               ip->i_d.di_atime.t_nsec = (__int32_t)now->tv_nsec;
-       }
+
        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
        return xfs_trans_commit(tp);
@@ -1232,8 +1212,6 @@ xfs_setup_inode(
        /* make the inode look hashed for the writeback code */
        hlist_add_fake(&inode->i_hash);
 
-       inode->i_mode   = ip->i_d.di_mode;
-       set_nlink(inode, ip->i_d.di_nlink);
        inode->i_uid    = xfs_uid_to_kuid(ip->i_d.di_uid);
        inode->i_gid    = xfs_gid_to_kgid(ip->i_d.di_gid);
 
@@ -1249,14 +1227,7 @@ xfs_setup_inode(
                break;
        }
 
-       inode->i_generation = ip->i_d.di_gen;
        i_size_write(inode, ip->i_d.di_size);
-       inode->i_atime.tv_sec   = ip->i_d.di_atime.t_sec;
-       inode->i_atime.tv_nsec  = ip->i_d.di_atime.t_nsec;
-       inode->i_mtime.tv_sec   = ip->i_d.di_mtime.t_sec;
-       inode->i_mtime.tv_nsec  = ip->i_d.di_mtime.t_nsec;
-       inode->i_ctime.tv_sec   = ip->i_d.di_ctime.t_sec;
-       inode->i_ctime.tv_nsec  = ip->i_d.di_ctime.t_nsec;
        xfs_diflags_to_iflags(inode, ip);
 
        ip->d_ops = ip->i_mount->m_nondir_inode_ops;
index 930ebd86bebac3a300faf44fabe77aa28258cf60..ce73eb34620dbbf06570650a582163a98a0d8f92 100644 (file)
@@ -57,6 +57,7 @@ xfs_bulkstat_one_int(
 {
        struct xfs_icdinode     *dic;           /* dinode core info pointer */
        struct xfs_inode        *ip;            /* incore inode pointer */
+       struct inode            *inode;
        struct xfs_bstat        *buf;           /* return buffer */
        int                     error = 0;      /* error value */
 
@@ -77,30 +78,33 @@ xfs_bulkstat_one_int(
 
        ASSERT(ip != NULL);
        ASSERT(ip->i_imap.im_blkno != 0);
+       inode = VFS_I(ip);
 
        dic = &ip->i_d;
 
        /* xfs_iget returns the following without needing
         * further change.
         */
-       buf->bs_nlink = dic->di_nlink;
        buf->bs_projid_lo = dic->di_projid_lo;
        buf->bs_projid_hi = dic->di_projid_hi;
        buf->bs_ino = ino;
-       buf->bs_mode = dic->di_mode;
        buf->bs_uid = dic->di_uid;
        buf->bs_gid = dic->di_gid;
        buf->bs_size = dic->di_size;
-       buf->bs_atime.tv_sec = dic->di_atime.t_sec;
-       buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
-       buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
-       buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
-       buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
-       buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
+
+       buf->bs_nlink = inode->i_nlink;
+       buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
+       buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
+       buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
+       buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
+       buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
+       buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
+       buf->bs_gen = inode->i_generation;
+       buf->bs_mode = inode->i_mode;
+
        buf->bs_xflags = xfs_ip2xflags(ip);
        buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
        buf->bs_extents = dic->di_nextents;
-       buf->bs_gen = dic->di_gen;
        memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
        buf->bs_dmevmask = dic->di_dmevmask;
        buf->bs_dmstate = dic->di_dmstate;
index 9c9a1c9bcc7f0bf0090fa4ffdffdfa35d6068122..40b700d3f42638537cf90f7020dbb3d5affd2347 100644 (file)
@@ -1212,7 +1212,7 @@ xlog_iodone(xfs_buf_t *bp)
        }
 
        /* log I/O is always issued ASYNC */
-       ASSERT(XFS_BUF_ISASYNC(bp));
+       ASSERT(bp->b_flags & XBF_ASYNC);
        xlog_state_done_syncing(iclog, aborted);
 
        /*
@@ -1864,9 +1864,8 @@ xlog_sync(
 
        bp->b_io_length = BTOBB(count);
        bp->b_fspriv = iclog;
-       XFS_BUF_ZEROFLAGS(bp);
-       XFS_BUF_ASYNC(bp);
-       bp->b_flags |= XBF_SYNCIO;
+       bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
+       bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
 
        if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
                bp->b_flags |= XBF_FUA;
@@ -1893,12 +1892,11 @@ xlog_sync(
 
        /* account for log which doesn't start at block #0 */
        XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
+
        /*
         * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
         * is shutting down.
         */
-       XFS_BUF_WRITE(bp);
-
        error = xlog_bdstrat(bp);
        if (error) {
                xfs_buf_ioerror_alert(bp, "xlog_sync");
@@ -1910,9 +1908,8 @@ xlog_sync(
                xfs_buf_associate_memory(bp,
                                (char *)&iclog->ic_header + count, split);
                bp->b_fspriv = iclog;
-               XFS_BUF_ZEROFLAGS(bp);
-               XFS_BUF_ASYNC(bp);
-               bp->b_flags |= XBF_SYNCIO;
+               bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
+               bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
                if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
                        bp->b_flags |= XBF_FUA;
 
@@ -1921,7 +1918,6 @@ xlog_sync(
 
                /* account for internal log which doesn't start at block #0 */
                XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
-               XFS_BUF_WRITE(bp);
                error = xlog_bdstrat(bp);
                if (error) {
                        xfs_buf_ioerror_alert(bp, "xlog_sync (split)");
@@ -3979,7 +3975,7 @@ xfs_log_force_umount(
            log->l_flags & XLOG_ACTIVE_RECOVERY) {
                mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
                if (mp->m_sb_bp)
-                       XFS_BUF_DONE(mp->m_sb_bp);
+                       mp->m_sb_bp->b_flags |= XBF_DONE;
                return 0;
        }
 
@@ -4009,7 +4005,7 @@ xfs_log_force_umount(
        spin_lock(&log->l_icloglock);
        mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
        if (mp->m_sb_bp)
-               XFS_BUF_DONE(mp->m_sb_bp);
+               mp->m_sb_bp->b_flags |= XBF_DONE;
 
        /*
         * Mark the log and the iclogs with IO error flags to prevent any
index da37beb76f6e67faf90a658d55f49a06c6fc4152..1dc0e1488d4c272aa0b59d4ac65a1ecc68845113 100644 (file)
@@ -190,7 +190,7 @@ xlog_bread_noalign(
        ASSERT(nbblks <= bp->b_length);
 
        XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
-       XFS_BUF_READ(bp);
+       bp->b_flags |= XBF_READ;
        bp->b_io_length = nbblks;
        bp->b_error = 0;
 
@@ -275,7 +275,6 @@ xlog_bwrite(
        ASSERT(nbblks <= bp->b_length);
 
        XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
-       XFS_BUF_ZEROFLAGS(bp);
        xfs_buf_hold(bp);
        xfs_buf_lock(bp);
        bp->b_io_length = nbblks;
@@ -2473,6 +2472,13 @@ xlog_recover_validate_buf_type(
                }
                bp->b_ops = &xfs_sb_buf_ops;
                break;
+#ifdef CONFIG_XFS_RT
+       case XFS_BLFT_RTBITMAP_BUF:
+       case XFS_BLFT_RTSUMMARY_BUF:
+               /* no magic numbers for verification of RT buffers */
+               bp->b_ops = &xfs_rtbuf_ops;
+               break;
+#endif /* CONFIG_XFS_RT */
        default:
                xfs_warn(mp, "Unknown buffer type %d!",
                         xfs_blft_from_flags(buf_f));
@@ -2793,7 +2799,7 @@ xfs_recover_inode_owner_change(
                return -ENOMEM;
 
        /* instantiate the inode */
-       xfs_dinode_from_disk(&ip->i_d, dip);
+       xfs_inode_from_disk(ip, dip);
        ASSERT(ip->i_d.di_version >= 3);
 
        error = xfs_iformat_fork(ip, dip);
@@ -2839,7 +2845,7 @@ xlog_recover_inode_pass2(
        int                     error;
        int                     attr_index;
        uint                    fields;
-       xfs_icdinode_t          *dicp;
+       struct xfs_log_dinode   *ldip;
        uint                    isize;
        int                     need_free = 0;
 
@@ -2892,8 +2898,8 @@ xlog_recover_inode_pass2(
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       dicp = item->ri_buf[1].i_addr;
-       if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
+       ldip = item->ri_buf[1].i_addr;
+       if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
                xfs_alert(mp,
                        "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
                        __func__, item, in_f->ilf_ino);
@@ -2929,13 +2935,13 @@ xlog_recover_inode_pass2(
         * to skip replay when the on disk inode is newer than the log one
         */
        if (!xfs_sb_version_hascrc(&mp->m_sb) &&
-           dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+           ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
                /*
                 * Deal with the wrap case, DI_MAX_FLUSH is less
                 * than smaller numbers
                 */
                if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
-                   dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+                   ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
                        /* do nothing */
                } else {
                        trace_xfs_log_recover_inode_skip(log, in_f);
@@ -2945,13 +2951,13 @@ xlog_recover_inode_pass2(
        }
 
        /* Take the opportunity to reset the flush iteration count */
-       dicp->di_flushiter = 0;
+       ldip->di_flushiter = 0;
 
-       if (unlikely(S_ISREG(dicp->di_mode))) {
-               if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
-                   (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
+       if (unlikely(S_ISREG(ldip->di_mode))) {
+               if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+                   (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
-                                        XFS_ERRLEVEL_LOW, mp, dicp);
+                                        XFS_ERRLEVEL_LOW, mp, ldip);
                        xfs_alert(mp,
                "%s: Bad regular inode log record, rec ptr 0x%p, "
                "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
@@ -2959,12 +2965,12 @@ xlog_recover_inode_pass2(
                        error = -EFSCORRUPTED;
                        goto out_release;
                }
-       } else if (unlikely(S_ISDIR(dicp->di_mode))) {
-               if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
-                   (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
-                   (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
+       } else if (unlikely(S_ISDIR(ldip->di_mode))) {
+               if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+                   (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
+                   (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
-                                            XFS_ERRLEVEL_LOW, mp, dicp);
+                                            XFS_ERRLEVEL_LOW, mp, ldip);
                        xfs_alert(mp,
                "%s: Bad dir inode log record, rec ptr 0x%p, "
                "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
@@ -2973,32 +2979,32 @@ xlog_recover_inode_pass2(
                        goto out_release;
                }
        }
-       if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
+       if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
        "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
        "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
                        __func__, item, dip, bp, in_f->ilf_ino,
-                       dicp->di_nextents + dicp->di_anextents,
-                       dicp->di_nblocks);
+                       ldip->di_nextents + ldip->di_anextents,
+                       ldip->di_nblocks);
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
+       if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
        "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
        "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
-                       item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
+                       item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       isize = xfs_icdinode_size(dicp->di_version);
+       isize = xfs_log_dinode_size(ldip->di_version);
        if (unlikely(item->ri_buf[1].i_len > isize)) {
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
                        "%s: Bad inode log record length %d, rec ptr 0x%p",
                        __func__, item->ri_buf[1].i_len, item);
@@ -3006,8 +3012,8 @@ xlog_recover_inode_pass2(
                goto out_release;
        }
 
-       /* The core is in in-core format */
-       xfs_dinode_to_disk(dip, dicp);
+       /* recover the log dinode inode into the on disk inode */
+       xfs_log_dinode_to_disk(ldip, dip);
 
        /* the rest is in on-disk format */
        if (item->ri_buf[1].i_len > isize) {
@@ -4337,8 +4343,8 @@ xlog_recover_process_one_iunlink(
        if (error)
                goto fail_iput;
 
-       ASSERT(ip->i_d.di_nlink == 0);
-       ASSERT(ip->i_d.di_mode != 0);
+       ASSERT(VFS_I(ip)->i_nlink == 0);
+       ASSERT(VFS_I(ip)->i_mode != 0);
 
        /* setup for the next pass */
        agino = be32_to_cpu(dip->di_next_unlinked);
@@ -4491,7 +4497,7 @@ xlog_recover_process(
         * know precisely what failed.
         */
        if (pass == XLOG_RECOVER_CRCPASS) {
-               if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc))
+               if (rhead->h_crc && crc != rhead->h_crc)
                        return -EFSBADCRC;
                return 0;
        }
@@ -4502,7 +4508,7 @@ xlog_recover_process(
         * zero CRC check prevents warnings from being emitted when upgrading
         * the kernel from one that does not add CRCs by default.
         */
-       if (crc != le32_to_cpu(rhead->h_crc)) {
+       if (crc != rhead->h_crc) {
                if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
                        xfs_alert(log->l_mp,
                "log record CRC mismatch: found 0x%x, expected 0x%x.",
@@ -4926,10 +4932,9 @@ xlog_do_recover(
         * updates, re-read in the superblock and reverify it.
         */
        bp = xfs_getsb(log->l_mp, 0);
-       XFS_BUF_UNDONE(bp);
-       ASSERT(!(XFS_BUF_ISWRITE(bp)));
-       XFS_BUF_READ(bp);
-       XFS_BUF_UNASYNC(bp);
+       bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
+       ASSERT(!(bp->b_flags & XBF_WRITE));
+       bp->b_flags |= XBF_READ;
        bp->b_ops = &xfs_sb_buf_ops;
 
        error = xfs_buf_submit_wait(bp);
index bb753b359bee188b13023caf4597582b402cd31d..986290c4b7ab9fc753772ffe2d56015ef94e4643 100644 (file)
@@ -865,7 +865,7 @@ xfs_mountfs(
 
        ASSERT(rip != NULL);
 
-       if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
+       if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
                xfs_warn(mp, "corrupted root inode %llu: not a directory",
                        (unsigned long long)rip->i_ino);
                xfs_iunlock(rip, XFS_ILOCK_EXCL);
@@ -1284,7 +1284,7 @@ xfs_getsb(
        }
 
        xfs_buf_hold(bp);
-       ASSERT(XFS_BUF_ISDONE(bp));
+       ASSERT(bp->b_flags & XBF_DONE);
        return bp;
 }
 
index b57098481c10a2a55a05bf6e75e6e43f7e224401..a4e03ab50342532ef394cd6ca0f34402a52c3153 100644 (file)
@@ -327,7 +327,6 @@ extern int  xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
                                 bool reserved);
 extern int     xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
 
-extern int     xfs_mount_log_sb(xfs_mount_t *);
 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
 extern int     xfs_readsb(xfs_mount_t *, int);
 extern void    xfs_freesb(xfs_mount_t *);
index 532ab79d38fe376c14a5463a97195b59a61d8f84..be125e1758c1a5e4df36cfb8ec6e3e3643adc534 100644 (file)
@@ -560,6 +560,37 @@ xfs_qm_shrink_count(
        return list_lru_shrink_count(&qi->qi_lru, sc);
 }
 
+STATIC void
+xfs_qm_set_defquota(
+       xfs_mount_t     *mp,
+       uint            type,
+       xfs_quotainfo_t *qinf)
+{
+       xfs_dquot_t             *dqp;
+       struct xfs_def_quota    *defq;
+       int                     error;
+
+       error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
+
+       if (!error) {
+               xfs_disk_dquot_t        *ddqp = &dqp->q_core;
+
+               defq = xfs_get_defquota(dqp, qinf);
+
+               /*
+                * Timers and warnings have been already set, let's just set the
+                * default limits for this quota type
+                */
+               defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
+               defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
+               defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
+               defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
+               defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
+               defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+               xfs_qm_dqdestroy(dqp);
+       }
+}
+
 /*
  * This initializes all the quota information that's kept in the
  * mount structure
@@ -606,19 +637,19 @@ xfs_qm_init_quotainfo(
         * We try to get the limits from the superuser's limits fields.
         * This is quite hacky, but it is standard quota practice.
         *
-        * We look at the USR dquot with id == 0 first, but if user quotas
-        * are not enabled we goto the GRP dquot with id == 0.
-        * We don't really care to keep separate default limits for user
-        * and group quotas, at least not at this point.
-        *
         * Since we may not have done a quotacheck by this point, just read
         * the dquot without attaching it to any hashtables or lists.
+        *
+        * Timers and warnings are globally set by the first timer found in
+        * user/group/proj quota types, otherwise a default value is used.
+        * This should be split into different fields per quota type.
         */
        error = xfs_qm_dqread(mp, 0,
                        XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
                         (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
                          XFS_DQ_PROJ),
                        XFS_QMOPT_DOWARN, &dqp);
+
        if (!error) {
                xfs_disk_dquot_t        *ddqp = &dqp->q_core;
 
@@ -639,13 +670,6 @@ xfs_qm_init_quotainfo(
                        be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
                qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
                        be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
-               qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
-               qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
-               qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
-               qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
-               qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
-               qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
-
                xfs_qm_dqdestroy(dqp);
        } else {
                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -656,6 +680,13 @@ xfs_qm_init_quotainfo(
                qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
        }
 
+       if (XFS_IS_UQUOTA_RUNNING(mp))
+               xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
+       if (XFS_IS_GQUOTA_RUNNING(mp))
+               xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
+       if (XFS_IS_PQUOTA_RUNNING(mp))
+               xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
+
        qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
        qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
        qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
index 996a04064894cf4c07ffa28d058150243ffd86a4..2975a822e9f044cbb1ff66ec217d29ab29bf9f22 100644 (file)
@@ -53,6 +53,15 @@ extern struct kmem_zone      *xfs_qm_dqtrxzone;
  */
 #define XFS_DQUOT_CLUSTER_SIZE_FSB     (xfs_filblks_t)1
 
+struct xfs_def_quota {
+       xfs_qcnt_t       bhardlimit;     /* default data blk hard limit */
+       xfs_qcnt_t       bsoftlimit;     /* default data blk soft limit */
+       xfs_qcnt_t       ihardlimit;     /* default inode count hard limit */
+       xfs_qcnt_t       isoftlimit;     /* default inode count soft limit */
+       xfs_qcnt_t       rtbhardlimit;   /* default realtime blk hard limit */
+       xfs_qcnt_t       rtbsoftlimit;   /* default realtime blk soft limit */
+};
+
 /*
  * Various quota information for individual filesystems.
  * The mount structure keeps a pointer to this.
@@ -76,12 +85,9 @@ typedef struct xfs_quotainfo {
        struct mutex     qi_quotaofflock;/* to serialize quotaoff */
        xfs_filblks_t    qi_dqchunklen;  /* # BBs in a chunk of dqs */
        uint             qi_dqperchunk;  /* # ondisk dqs in above chunk */
-       xfs_qcnt_t       qi_bhardlimit;  /* default data blk hard limit */
-       xfs_qcnt_t       qi_bsoftlimit;  /* default data blk soft limit */
-       xfs_qcnt_t       qi_ihardlimit;  /* default inode count hard limit */
-       xfs_qcnt_t       qi_isoftlimit;  /* default inode count soft limit */
-       xfs_qcnt_t       qi_rtbhardlimit;/* default realtime blk hard limit */
-       xfs_qcnt_t       qi_rtbsoftlimit;/* default realtime blk soft limit */
+       struct xfs_def_quota    qi_usr_default;
+       struct xfs_def_quota    qi_grp_default;
+       struct xfs_def_quota    qi_prj_default;
        struct shrinker  qi_shrinker;
 } xfs_quotainfo_t;
 
@@ -104,15 +110,15 @@ xfs_dquot_tree(
 }
 
 static inline struct xfs_inode *
-xfs_dq_to_quota_inode(struct xfs_dquot *dqp)
+xfs_quota_inode(xfs_mount_t *mp, uint dq_flags)
 {
-       switch (dqp->dq_flags & XFS_DQ_ALLTYPES) {
+       switch (dq_flags & XFS_DQ_ALLTYPES) {
        case XFS_DQ_USER:
-               return dqp->q_mount->m_quotainfo->qi_uquotaip;
+               return mp->m_quotainfo->qi_uquotaip;
        case XFS_DQ_GROUP:
-               return dqp->q_mount->m_quotainfo->qi_gquotaip;
+               return mp->m_quotainfo->qi_gquotaip;
        case XFS_DQ_PROJ:
-               return dqp->q_mount->m_quotainfo->qi_pquotaip;
+               return mp->m_quotainfo->qi_pquotaip;
        default:
                ASSERT(0);
        }
@@ -164,11 +170,27 @@ extern void               xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
 
 /* quota ops */
 extern int             xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
-extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
-                                       uint, struct qc_dqblk *);
+extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t *,
+                                       uint, struct qc_dqblk *, uint);
 extern int             xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
                                        struct qc_dqblk *);
 extern int             xfs_qm_scall_quotaon(struct xfs_mount *, uint);
 extern int             xfs_qm_scall_quotaoff(struct xfs_mount *, uint);
 
+static inline struct xfs_def_quota *
+xfs_get_defquota(struct xfs_dquot *dqp, struct xfs_quotainfo *qi)
+{
+       struct xfs_def_quota *defq;
+
+       if (XFS_QM_ISUDQ(dqp))
+               defq = &qi->qi_usr_default;
+       else if (XFS_QM_ISGDQ(dqp))
+               defq = &qi->qi_grp_default;
+       else {
+               ASSERT(XFS_QM_ISPDQ(dqp));
+               defq = &qi->qi_prj_default;
+       }
+       return defq;
+}
+
 #endif /* __XFS_QM_H__ */
index 3640c6e896af70eb2e910a31786cb7ac2298f847..f4d0e0a8f517c65913b8d45f383450384576b39e 100644 (file)
@@ -404,6 +404,7 @@ xfs_qm_scall_setqlim(
        struct xfs_disk_dquot   *ddq;
        struct xfs_dquot        *dqp;
        struct xfs_trans        *tp;
+       struct xfs_def_quota    *defq;
        int                     error;
        xfs_qcnt_t              hard, soft;
 
@@ -431,6 +432,8 @@ xfs_qm_scall_setqlim(
                ASSERT(error != -ENOENT);
                goto out_unlock;
        }
+
+       defq = xfs_get_defquota(dqp, q);
        xfs_dqunlock(dqp);
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
@@ -458,8 +461,8 @@ xfs_qm_scall_setqlim(
                ddq->d_blk_softlimit = cpu_to_be64(soft);
                xfs_dquot_set_prealloc_limits(dqp);
                if (id == 0) {
-                       q->qi_bhardlimit = hard;
-                       q->qi_bsoftlimit = soft;
+                       defq->bhardlimit = hard;
+                       defq->bsoftlimit = soft;
                }
        } else {
                xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
@@ -474,8 +477,8 @@ xfs_qm_scall_setqlim(
                ddq->d_rtb_hardlimit = cpu_to_be64(hard);
                ddq->d_rtb_softlimit = cpu_to_be64(soft);
                if (id == 0) {
-                       q->qi_rtbhardlimit = hard;
-                       q->qi_rtbsoftlimit = soft;
+                       defq->rtbhardlimit = hard;
+                       defq->rtbsoftlimit = soft;
                }
        } else {
                xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
@@ -491,8 +494,8 @@ xfs_qm_scall_setqlim(
                ddq->d_ino_hardlimit = cpu_to_be64(hard);
                ddq->d_ino_softlimit = cpu_to_be64(soft);
                if (id == 0) {
-                       q->qi_ihardlimit = hard;
-                       q->qi_isoftlimit = soft;
+                       defq->ihardlimit = hard;
+                       defq->isoftlimit = soft;
                }
        } else {
                xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
@@ -635,9 +638,10 @@ out:
 int
 xfs_qm_scall_getquota(
        struct xfs_mount        *mp,
-       xfs_dqid_t              id,
+       xfs_dqid_t              *id,
        uint                    type,
-       struct qc_dqblk         *dst)
+       struct qc_dqblk         *dst,
+       uint                    dqget_flags)
 {
        struct xfs_dquot        *dqp;
        int                     error;
@@ -647,7 +651,7 @@ xfs_qm_scall_getquota(
         * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
         * exist, we'll get ENOENT back.
         */
-       error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
+       error = xfs_qm_dqget(mp, NULL, *id, type, dqget_flags, &dqp);
        if (error)
                return error;
 
@@ -660,6 +664,9 @@ xfs_qm_scall_getquota(
                goto out_put;
        }
 
+       /* Fill in the ID we actually read from disk */
+       *id = be32_to_cpu(dqp->q_core.d_id);
+
        memset(dst, 0, sizeof(*dst));
        dst->d_spc_hardlimit =
                XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
@@ -701,7 +708,7 @@ xfs_qm_scall_getquota(
        if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
             (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
             (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
-           id != 0) {
+           *id != 0) {
                if ((dst->d_space > dst->d_spc_softlimit) &&
                    (dst->d_spc_softlimit > 0)) {
                        ASSERT(dst->d_spc_timer != 0);
index 7795e0d01382a60798b4e83f35ba6db725fe8779..f82d79a8c694a8f32427b8d0e2923dbde1670d88 100644 (file)
@@ -231,14 +231,45 @@ xfs_fs_get_dqblk(
        struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
+       xfs_dqid_t              id;
 
        if (!XFS_IS_QUOTA_RUNNING(mp))
                return -ENOSYS;
        if (!XFS_IS_QUOTA_ON(mp))
                return -ESRCH;
 
-       return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
-                                     xfs_quota_type(qid.type), qdq);
+       id = from_kqid(&init_user_ns, qid);
+       return xfs_qm_scall_getquota(mp, &id,
+                                     xfs_quota_type(qid.type), qdq, 0);
+}
+
+/* Return quota info for active quota >= this qid */
+STATIC int
+xfs_fs_get_nextdqblk(
+       struct super_block      *sb,
+       struct kqid             *qid,
+       struct qc_dqblk         *qdq)
+{
+       int                     ret;
+       struct xfs_mount        *mp = XFS_M(sb);
+       xfs_dqid_t              id;
+
+       if (!XFS_IS_QUOTA_RUNNING(mp))
+               return -ENOSYS;
+       if (!XFS_IS_QUOTA_ON(mp))
+               return -ESRCH;
+
+       id = from_kqid(&init_user_ns, *qid);
+       ret = xfs_qm_scall_getquota(mp, &id,
+                                   xfs_quota_type(qid->type), qdq,
+                                   XFS_QMOPT_DQNEXT);
+       if (ret)
+               return ret;
+
+       /* ID may be different, so convert back what we got */
+       *qid = make_kqid(current_user_ns(), qid->type, id);
+       return 0;
+       
 }
 
 STATIC int
@@ -267,5 +298,6 @@ const struct quotactl_ops xfs_quotactl_operations = {
        .quota_disable          = xfs_quota_disable,
        .rm_xquota              = xfs_fs_rm_xquota,
        .get_dqblk              = xfs_fs_get_dqblk,
+       .get_nextdqblk          = xfs_fs_get_nextdqblk,
        .set_dqblk              = xfs_fs_set_dqblk,
 };
index be02a68b2fe292e077c84862f93271dd049c3359..abf44435d04a3f4b898e21a00e45ee8ae607738a 100644 (file)
@@ -1272,7 +1272,7 @@ xfs_rtpick_extent(
 
        ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
 
-       seqp = (__uint64_t *)&mp->m_rbmip->i_d.di_atime;
+       seqp = (__uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
        if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
                mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
                *seqp = 0;
index 391d797cb53fee0a9196d3e392dfd8fa95d5d33a..c8d58426008ed7ef49096097904ed13653a8cfe9 100644 (file)
@@ -1296,11 +1296,7 @@ DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
 DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
 DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
 DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_new);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_update);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_none);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_endio);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1340,6 +1336,9 @@ DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
 DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
 DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
 DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
+DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
+DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_unwritten);
+DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_append);
 
 DECLARE_EVENT_CLASS(xfs_itrunc_class,
        TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
index 4f18fd92ca13b21d8fd68e955e082d9db9a61195..d6c9c3e9e02b2c45f2cd57074cbbcdebde1e804a 100644 (file)
@@ -497,6 +497,7 @@ xfsaild(
        long            tout = 0;       /* milliseconds */
 
        current->flags |= PF_MEMALLOC;
+       set_freezable();
 
        while (!kthread_should_stop()) {
                if (tout && tout <= 20)
@@ -519,14 +520,14 @@ xfsaild(
                if (!xfs_ail_min(ailp) &&
                    ailp->xa_target == ailp->xa_target_prev) {
                        spin_unlock(&ailp->xa_lock);
-                       schedule();
+                       freezable_schedule();
                        tout = 0;
                        continue;
                }
                spin_unlock(&ailp->xa_lock);
 
                if (tout)
-                       schedule_timeout(msecs_to_jiffies(tout));
+                       freezable_schedule_timeout(msecs_to_jiffies(tout));
 
                __set_current_state(TASK_RUNNING);
 
index 75798412859a7ba2f47b01945c54b7ee82ff4e7e..8ee29ca132dc13c0f302fa470cfaffc788cb9938 100644 (file)
@@ -155,7 +155,7 @@ xfs_trans_get_buf_map(
                ASSERT(xfs_buf_islocked(bp));
                if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
                        xfs_buf_stale(bp);
-                       XFS_BUF_DONE(bp);
+                       bp->b_flags |= XBF_DONE;
                }
 
                ASSERT(bp->b_transp == tp);
@@ -518,7 +518,7 @@ xfs_trans_log_buf(xfs_trans_t       *tp,
         * inside the b_bdstrat callback so that this won't get written to
         * disk.
         */
-       XFS_BUF_DONE(bp);
+       bp->b_flags |= XBF_DONE;
 
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        bp->b_iodone = xfs_buf_iodone_callbacks;
@@ -534,8 +534,8 @@ xfs_trans_log_buf(xfs_trans_t       *tp,
         */
        if (bip->bli_flags & XFS_BLI_STALE) {
                bip->bli_flags &= ~XFS_BLI_STALE;
-               ASSERT(XFS_BUF_ISSTALE(bp));
-               XFS_BUF_UNSTALE(bp);
+               ASSERT(bp->b_flags & XBF_STALE);
+               bp->b_flags &= ~XBF_STALE;
                bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
        }
 
@@ -600,7 +600,7 @@ xfs_trans_binval(
                 * If the buffer is already invalidated, then
                 * just return.
                 */
-               ASSERT(XFS_BUF_ISSTALE(bp));
+               ASSERT(bp->b_flags & XBF_STALE);
                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
                ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
                ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
index 995170194df040b5b3e02cab80b5942ee92cb2ad..c3d547211d16001ad686c543fd960996b2321c75 100644 (file)
@@ -609,17 +609,20 @@ xfs_trans_dqresv(
        xfs_qcnt_t      total_count;
        xfs_qcnt_t      *resbcountp;
        xfs_quotainfo_t *q = mp->m_quotainfo;
+       struct xfs_def_quota    *defq;
 
 
        xfs_dqlock(dqp);
 
+       defq = xfs_get_defquota(dqp, q);
+
        if (flags & XFS_TRANS_DQ_RES_BLKS) {
                hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
                if (!hardlimit)
-                       hardlimit = q->qi_bhardlimit;
+                       hardlimit = defq->bhardlimit;
                softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
                if (!softlimit)
-                       softlimit = q->qi_bsoftlimit;
+                       softlimit = defq->bsoftlimit;
                timer = be32_to_cpu(dqp->q_core.d_btimer);
                warns = be16_to_cpu(dqp->q_core.d_bwarns);
                warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
@@ -628,10 +631,10 @@ xfs_trans_dqresv(
                ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
                hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
                if (!hardlimit)
-                       hardlimit = q->qi_rtbhardlimit;
+                       hardlimit = defq->rtbhardlimit;
                softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
                if (!softlimit)
-                       softlimit = q->qi_rtbsoftlimit;
+                       softlimit = defq->rtbsoftlimit;
                timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
                warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
                warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
@@ -672,10 +675,10 @@ xfs_trans_dqresv(
                        warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
                        hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
                        if (!hardlimit)
-                               hardlimit = q->qi_ihardlimit;
+                               hardlimit = defq->ihardlimit;
                        softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
                        if (!softlimit)
-                               softlimit = q->qi_isoftlimit;
+                               softlimit = defq->isoftlimit;
 
                        if (hardlimit && total_count > hardlimit) {
                                xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
index b97f1df910abb0bd60ac5850fbe549237806d078..11a3af08b5c7ea1e40dcd348606414e147e6a0cd 100644 (file)
@@ -75,18 +75,10 @@ xfs_trans_ichgtime(
 
        tv = current_fs_time(inode->i_sb);
 
-       if ((flags & XFS_ICHGTIME_MOD) &&
-           !timespec_equal(&inode->i_mtime, &tv)) {
+       if (flags & XFS_ICHGTIME_MOD)
                inode->i_mtime = tv;
-               ip->i_d.di_mtime.t_sec = tv.tv_sec;
-               ip->i_d.di_mtime.t_nsec = tv.tv_nsec;
-       }
-       if ((flags & XFS_ICHGTIME_CHG) &&
-           !timespec_equal(&inode->i_ctime, &tv)) {
+       if (flags & XFS_ICHGTIME_CHG)
                inode->i_ctime = tv;
-               ip->i_d.di_ctime.t_sec = tv.tv_sec;
-               ip->i_d.di_ctime.t_nsec = tv.tv_nsec;
-       }
 }
 
 /*
@@ -125,7 +117,7 @@ xfs_trans_log_inode(
         */
        if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
            IS_I_VERSION(VFS_I(ip))) {
-               ip->i_d.di_changecount = ++VFS_I(ip)->i_version;
+               VFS_I(ip)->i_version++;
                flags |= XFS_ILOG_CORE;
        }
 
index 717a29810473894aaf2f22e6e487f84087628df5..dad8af3ebeb5405c144db72cea488d7f5ea5fd39 100644 (file)
@@ -133,6 +133,5 @@ extern int acpi_get_psd_map(struct cpudata **);
 /* Methods to interact with the PCC mailbox controller. */
 extern struct mbox_chan *
        pcc_mbox_request_channel(struct mbox_client *, unsigned int);
-extern int mbox_send_message(struct mbox_chan *chan, void *mssg);
 
 #endif /* _CPPC_ACPI_H*/
index 1cbb8338edf391bd83c4d1b0bc0dff2cbbe56e75..827e4d3bbc7a46ef59222651a8020234addc82cb 100644 (file)
@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
 #endif
 
 /* Return a pointer with offset calculated */
-#define __set_fixmap_offset(idx, phys, flags)                \
-({                                                           \
-       unsigned long addr;                                   \
-       __set_fixmap(idx, phys, flags);                       \
-       addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
-       addr;                                                 \
+#define __set_fixmap_offset(idx, phys, flags)                          \
+({                                                                     \
+       unsigned long ________addr;                                     \
+       __set_fixmap(idx, phys, flags);                                 \
+       ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1));   \
+       ________addr;                                                   \
 })
 
 #define set_fixmap_offset(idx, phys) \
diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h
deleted file mode 100644 (file)
index 20db2e5..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_GENERIC_PCI_BRIDGE_H
-#define _ASM_GENERIC_PCI_BRIDGE_H
-
-#ifdef __KERNEL__
-
-enum {
-       /* Force re-assigning all resources (ignore firmware
-        * setup completely)
-        */
-       PCI_REASSIGN_ALL_RSRC   = 0x00000001,
-
-       /* Re-assign all bus numbers */
-       PCI_REASSIGN_ALL_BUS    = 0x00000002,
-
-       /* Do not try to assign, just use existing setup */
-       PCI_PROBE_ONLY          = 0x00000004,
-
-       /* Don't bother with ISA alignment unless the bridge has
-        * ISA forwarding enabled
-        */
-       PCI_CAN_SKIP_ISA_ALIGN  = 0x00000008,
-
-       /* Enable domain numbers in /proc */
-       PCI_ENABLE_PROC_DOMAINS = 0x00000010,
-       /* ... except for domain 0 */
-       PCI_COMPAT_DOMAIN_0     = 0x00000020,
-
-       /* PCIe downstream ports are bridges that normally lead to only a
-        * device 0, but if this is set, we scan all possible devices, not
-        * just device 0.
-        */
-       PCI_SCAN_ALL_PCIE_DEVS  = 0x00000040,
-};
-
-#ifdef CONFIG_PCI
-extern unsigned int pci_flags;
-
-static inline void pci_set_flags(int flags)
-{
-       pci_flags = flags;
-}
-
-static inline void pci_add_flags(int flags)
-{
-       pci_flags |= flags;
-}
-
-static inline void pci_clear_flags(int flags)
-{
-       pci_flags &= ~flags;
-}
-
-static inline int pci_has_flag(int flag)
-{
-       return pci_flags & flag;
-}
-#else
-static inline void pci_set_flags(int flags) { }
-static inline void pci_add_flags(int flags) { }
-static inline void pci_clear_flags(int flags) { }
-static inline int pci_has_flag(int flag)
-{
-       return 0;
-}
-#endif /* CONFIG_PCI */
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_PCI_BRIDGE_H */
index 0b3c0d39ef753053bb26c1b9fb4979e706240a58..c370b261c72004dcafa3dd036920b7b5fe3d01a2 100644 (file)
@@ -239,6 +239,14 @@ extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp);
 #endif
 
+#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+                                          unsigned long address, pmd_t *pmdp)
+{
+
+}
+#endif
+
 #ifndef __HAVE_ARCH_PTE_SAME
 static inline int pte_same(pte_t pte_a, pte_t pte_b)
 {
index 89d008dc08e2e2c6caa1d1d5eeb1ac3bdedd2474..fe5efada9d68237c3863ad8f9bb972da6f610819 100644 (file)
@@ -42,6 +42,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
                             bool async);
 
+bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
+                                          struct drm_atomic_state *old_state,
+                                          struct drm_crtc *crtc);
+
 void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                                        struct drm_atomic_state *old_state);
 
index 7bfb063029d83fe1374e78a174f4a638bc39bc49..461a0558bca4d8d16e81b88e0286e3fa6251ab79 100644 (file)
 
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 
+static inline bool drm_arch_can_wc_memory(void)
+{
+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+       return false;
+#else
+       return true;
+#endif
+}
+
 #endif
index 24ab1787b771936fdf1894aabd6271f65a771993..fdb47051d5492a4db126cc525667bd181343d2cd 100644 (file)
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
 /**
  * struct drm_dp_mst_port - MST port
  * @kref: reference count for this port.
- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
- * @guid: guid for DP 1.2 device on this port.
  * @port_num: port number
  * @input: if this port is an input port.
  * @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
 struct drm_dp_mst_port {
        struct kref kref;
 
-       /* if dpcd 1.2 device is on this port - its GUID info */
-       bool guid_valid;
-       u8 guid[16];
-
        u8 port_num;
        bool input;
        bool mcs;
@@ -110,10 +104,12 @@ struct drm_dp_mst_port {
  * @tx_slots: transmission slots for this device.
  * @last_seqno: last sequence number used to talk to this.
  * @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
  *
  * This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any others connected
- * to downstream ports
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
  */
 struct drm_dp_mst_branch {
        struct kref kref;
@@ -132,6 +128,9 @@ struct drm_dp_mst_branch {
        struct drm_dp_sideband_msg_tx *tx_slots[2];
        int last_seqno;
        bool link_address_sent;
+
+       /* global unique identifier to identify branch devices */
+       u8 guid[16];
 };
 
 
@@ -406,11 +405,9 @@ struct drm_dp_payload {
  * @conn_base_id: DRM connector ID this mgr is connected to.
  * @down_rep_recv: msg receiver state for down replies.
  * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, guid, dpcd.
+ * @lock: protects mst state, primary, dpcd.
  * @mst_state: if this manager is enabled for an MST capable port.
  * @mst_primary: pointer to the primary branch device.
- * @guid_valid: GUID valid for the primary branch device.
- * @guid: GUID for primary port.
  * @dpcd: cache of DPCD for primary port.
  * @pbn_div: PBN to slots divisor.
  *
@@ -432,13 +429,11 @@ struct drm_dp_mst_topology_mgr {
        struct drm_dp_sideband_msg_rx up_req_recv;
 
        /* pointer to info about the initial MST device */
-       struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+       struct mutex lock; /* protects mst_state + primary + dpcd */
 
        bool mst_state;
        struct drm_dp_mst_branch *mst_primary;
-       /* primary MST device GUID */
-       bool guid_valid;
-       u8 guid[16];
+
        u8 dpcd[DP_RECEIVER_CAP_SIZE];
        u8 sink_count;
        int pbn_div;
index d639049a613dfdb264650f8ee938836e6ed28e10..553210c02ee0f655fd28b8ae64d370e7cee2fc70 100644 (file)
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
 #define DRM_FIXED_ONE          (1ULL << DRM_FIXED_POINT)
 #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
 #define DRM_FIXED_DIGITS_MASK  (~DRM_FIXED_DECIMAL_MASK)
+#define DRM_FIXED_EPSILON      1LL
+#define DRM_FIXED_ALMOST_ONE   (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
 
 static inline s64 drm_int2fixp(int a)
 {
        return ((s64)a) << DRM_FIXED_POINT;
 }
 
-static inline int drm_fixp2int(int64_t a)
+static inline int drm_fixp2int(s64 a)
 {
        return ((s64)a) >> DRM_FIXED_POINT;
 }
 
-static inline unsigned drm_fixp_msbset(int64_t a)
+static inline int drm_fixp2int_ceil(s64 a)
+{
+       if (a > 0)
+               return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+       else
+               return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+}
+
+static inline unsigned drm_fixp_msbset(s64 a)
 {
        unsigned shift, sign = (a >> 63) & 1;
 
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
        return result;
 }
 
+static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
+{
+       s64 res;
+       bool a_neg = a < 0;
+       bool b_neg = b < 0;
+       u64 a_abs = a_neg ? -a : a;
+       u64 b_abs = b_neg ? -b : b;
+       u64 rem;
+
+       /* determine integer part */
+       u64 res_abs  = div64_u64_rem(a_abs, b_abs, &rem);
+
+       /* determine fractional part */
+       {
+               u32 i = DRM_FIXED_POINT;
+
+               do {
+                       rem <<= 1;
+                       res_abs <<= 1;
+                       if (rem >= b_abs) {
+                               res_abs |= 1;
+                               rem -= b_abs;
+                       }
+               } while (--i != 0);
+       }
+
+       /* round up LSB */
+       {
+               u64 summand = (rem << 1) >= b_abs;
+
+               res_abs += summand;
+       }
+
+       res = (s64) res_abs;
+       if (a_neg ^ b_neg)
+               res = -res;
+       return res;
+}
+
 static inline s64 drm_fixp_exp(s64 x)
 {
        s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
index 1579e07f96a312db9e456fb5b2c8064f1e1f26af..efcbc594fe82e6837a73d575c215b826dcddca6d 100644 (file)
 #define R8A7793_CLK_SCU_ALL            17
 #define R8A7793_CLK_SCU_DVC1           18
 #define R8A7793_CLK_SCU_DVC0           19
+#define R8A7793_CLK_SCU_CTU1_MIX1      20
+#define R8A7793_CLK_SCU_CTU0_MIX0      21
 #define R8A7793_CLK_SCU_SRC9           22
 #define R8A7793_CLK_SCU_SRC8           23
 #define R8A7793_CLK_SCU_SRC7           24
index 8df77a7c030b0647abbf81e3a592c488f273638c..4f53e70f68ee5b32ee69cd825e370ba3f65d9ce8 100644 (file)
@@ -55,6 +55,7 @@
 #define SCLK_TIMER6            90
 #define SCLK_JTAG              91
 #define SCLK_SMC               92
+#define SCLK_TSADC             93
 
 #define DCLK_LCDC0             190
 #define DCLK_LCDC1             191
index 6f45aea49e4ff049a51394f6782a803e32c389eb..0a05b0d36ae74d6c4e4a7e810e442ab2ff2b09bf 100644 (file)
 /* 104 */
 /* 105 */
 #define TEGRA210_CLK_D_AUDIO 106
-/* 107 ( affects abp -> ape) */
+#define TEGRA210_CLK_APB2APE 107
 /* 108 */
 /* 109 */
 /* 110 */
diff --git a/include/dt-bindings/power/rk3368-power.h b/include/dt-bindings/power/rk3368-power.h
new file mode 100644 (file)
index 0000000..93633d5
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef __DT_BINDINGS_POWER_RK3368_POWER_H__
+#define __DT_BINDINGS_POWER_RK3368_POWER_H__
+
+/* VD_CORE */
+#define RK3368_PD_A53_L0       0
+#define RK3368_PD_A53_L1       1
+#define RK3368_PD_A53_L2       2
+#define RK3368_PD_A53_L3       3
+#define RK3368_PD_SCU_L                4
+#define RK3368_PD_A53_B0       5
+#define RK3368_PD_A53_B1       6
+#define RK3368_PD_A53_B2       7
+#define RK3368_PD_A53_B3       8
+#define RK3368_PD_SCU_B                9
+
+/* VD_LOGIC */
+#define RK3368_PD_BUS          10
+#define RK3368_PD_PERI         11
+#define RK3368_PD_VIO          12
+#define RK3368_PD_ALIVE                13
+#define RK3368_PD_VIDEO                14
+#define RK3368_PD_GPU_0                15
+#define RK3368_PD_GPU_1                16
+
+/* VD_PMU */
+#define RK3368_PD_PMU          17
+
+#endif
index 3feb1b2d75d87b83febb605a86720b06a8132d3f..0367c63f596019b5b94d66f315d426e6ab8a4cc4 100644 (file)
@@ -151,6 +151,8 @@ struct bcma_host_ops {
 #define BCMA_CORE_PCIE2                        0x83C   /* PCI Express Gen2 */
 #define BCMA_CORE_USB30_DEV            0x83D
 #define BCMA_CORE_ARM_CR4              0x83E
+#define BCMA_CORE_GCI                  0x840
+#define BCMA_CORE_CMEM                 0x846   /* CNDS DDR2/3 memory controller */
 #define BCMA_CORE_ARM_CA7              0x847
 #define BCMA_CORE_SYS_MEM              0x849
 #define BCMA_CORE_DEFAULT              0xFFF
@@ -199,6 +201,7 @@ struct bcma_host_ops {
 #define  BCMA_PKG_ID_BCM4707   1
 #define  BCMA_PKG_ID_BCM4708   2
 #define  BCMA_PKG_ID_BCM4709   0
+#define BCMA_CHIP_ID_BCM47094  53030
 #define BCMA_CHIP_ID_BCM53018  53018
 
 /* Board types (on PCI usually equals to the subsystem dev id) */
index db51a6ffb7d6839ba1ffd08ffd072faed79031dc..700d0c6f7480e46f972a7f30756cbb3320f3dd6c 100644 (file)
 #define         BCMA_CC_CLKDIV_JTAG_SHIFT      8
 #define         BCMA_CC_CLKDIV_UART            0x000000FF
 #define BCMA_CC_CAP_EXT                        0x00AC          /* Capabilities */
+#define  BCMA_CC_CAP_EXT_SECI_PRESENT  0x00000001
+#define  BCMA_CC_CAP_EXT_GSIO_PRESENT  0x00000002
+#define  BCMA_CC_CAP_EXT_GCI_PRESENT   0x00000004
+#define  BCMA_CC_CAP_EXT_SECI_PUART_PRESENT            0x00000008    /* UART present */
+#define  BCMA_CC_CAP_EXT_AOB_PRESENT   0x00000040
 #define BCMA_CC_PLLONDELAY             0x00B0          /* Rev >= 4 only */
 #define BCMA_CC_FREFSELDELAY           0x00B4          /* Rev >= 4 only */
 #define BCMA_CC_SLOWCLKCTL             0x00B8          /* 6 <= Rev <= 9 only */
 #define BCMA_CC_PMU_RES_REQTS          0x0640 /* PMU res req timer sel */
 #define BCMA_CC_PMU_RES_REQT           0x0644 /* PMU res req timer */
 #define BCMA_CC_PMU_RES_REQM           0x0648 /* PMU res req mask */
-#define BCMA_CC_CHIPCTL_ADDR           0x0650
-#define BCMA_CC_CHIPCTL_DATA           0x0654
-#define BCMA_CC_REGCTL_ADDR            0x0658
-#define BCMA_CC_REGCTL_DATA            0x065C
-#define BCMA_CC_PLLCTL_ADDR            0x0660
-#define BCMA_CC_PLLCTL_DATA            0x0664
+#define BCMA_CC_PMU_CHIPCTL_ADDR       0x0650
+#define BCMA_CC_PMU_CHIPCTL_DATA       0x0654
+#define BCMA_CC_PMU_REGCTL_ADDR                0x0658
+#define BCMA_CC_PMU_REGCTL_DATA                0x065C
+#define BCMA_CC_PMU_PLLCTL_ADDR                0x0660
+#define BCMA_CC_PMU_PLLCTL_DATA                0x0664
 #define BCMA_CC_PMU_STRAPOPT           0x0668 /* (corerev >= 28) */
 #define BCMA_CC_PMU_XTAL_FREQ          0x066C /* (pmurev >= 10) */
 #define  BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK     0x00001FFF
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
  */
 struct bcma_chipcommon_pmu {
+       struct bcma_device *core;       /* Can be separated core or just ChipCommon one */
        u8 rev;                 /* PMU revision */
        u32 crystalfreq;        /* The active crystal frequency (in kHz) */
 };
@@ -660,6 +666,19 @@ struct bcma_drv_cc_b {
 #define bcma_cc_maskset32(cc, offset, mask, set) \
        bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
 
+/* PMU registers access */
+#define bcma_pmu_read32(cc, offset) \
+       bcma_read32((cc)->pmu.core, offset)
+#define bcma_pmu_write32(cc, offset, val) \
+       bcma_write32((cc)->pmu.core, offset, val)
+
+#define bcma_pmu_mask32(cc, offset, mask) \
+       bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask))
+#define bcma_pmu_set32(cc, offset, set) \
+       bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set))
+#define bcma_pmu_maskset32(cc, offset, mask, set) \
+       bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set))
+
 extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
 
 extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
index 83d1926c61e4567b881bfbc26b75b802c428cbc3..90ee6ab24bc53badebf5ab0f6362de1d4ad2062d 100644 (file)
@@ -151,6 +151,7 @@ struct bpf_array {
        union {
                char value[0] __aligned(8);
                void *ptrs[0] __aligned(8);
+               void __percpu *pptrs[0] __aligned(8);
        };
 };
 #define MAX_TAIL_CALL_CNT 32
@@ -182,6 +183,29 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
 int bpf_obj_get_user(const char __user *pathname);
 
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+                          u64 flags);
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+                           u64 flags);
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only.  No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+       const long *lsrc = src;
+       long *ldst = dst;
+
+       size /= sizeof(long);
+       while (size--)
+               *ldst++ = *lsrc++;
+}
+
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
 #else
index f89b31d45cc894814d409a19e161a29c2c41e832..c1ef6f14e7be7f1317e34a0deac61fcb80511eef 100644 (file)
 #define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
 // duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
 #define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49)  /* overlap w/ above */
+#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
+#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
+#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
+#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
+#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
+#define CEPH_FEATURE_NEW_OSDOP_ENCODING   (1ULL<<56) /* New, v7 encoding */
+#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
+#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
+#define CEPH_FEATURE_CRUSH_TUNABLES5   (1ULL<<58) /* chooseleaf stable mode */
+// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
+#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING   (1ULL<<58) /* New, v7 encoding */
 
 /*
  * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features)
         CEPH_FEATURE_CRUSH_TUNABLES3 |         \
         CEPH_FEATURE_OSD_PRIMARY_AFFINITY |    \
         CEPH_FEATURE_MSGR_KEEPALIVE2 |         \
-        CEPH_FEATURE_CRUSH_V4)
+        CEPH_FEATURE_CRUSH_V4 |                \
+        CEPH_FEATURE_CRUSH_TUNABLES5 |         \
+        CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
 
 #define CEPH_FEATURES_REQUIRED_DEFAULT   \
        (CEPH_FEATURE_NOSRCADDR |        \
index 7f540f7f588d8c8461af975a5ebd21a08e6cf14b..789471dba6fb30f15c752fbca46ddbefe5bfe850 100644 (file)
@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
         */
        u64 serial_nr;
 
+       /*
+        * Incremented by online self and children.  Used to guarantee that
+        * parents are not offlined before their children.
+        */
+       atomic_t online_cnt;
+
        /* percpu_ref killing and RCU release */
        struct rcu_head rcu_head;
        struct work_struct destroy_work;
index bda5ec0b4b4dc26a8431756e468241aad6bdd030..fccf7f44139dd67c2bae6db1799e2517bcd9cd00 100644 (file)
@@ -37,7 +37,7 @@ struct cleancache_ops {
        void (*invalidate_fs)(int);
 };
 
-extern int cleancache_register_ops(struct cleancache_ops *ops);
+extern int cleancache_register_ops(const struct cleancache_ops *ops);
 extern void __cleancache_init_fs(struct super_block *);
 extern void __cleancache_init_shared_fs(struct super_block *);
 extern int  __cleancache_get_page(struct page *);
@@ -48,14 +48,14 @@ extern void __cleancache_invalidate_fs(struct super_block *);
 
 #ifdef CONFIG_CLEANCACHE
 #define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled(struct page *page)
-{
-       return page->mapping->host->i_sb->cleancache_poolid >= 0;
-}
 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
 {
        return mapping->host->i_sb->cleancache_poolid >= 0;
 }
+static inline bool cleancache_fs_enabled(struct page *page)
+{
+       return cleancache_fs_enabled_mapping(page->mapping);
+}
 #else
 #define cleancache_enabled (0)
 #define cleancache_fs_enabled(_page) (0)
@@ -89,11 +89,9 @@ static inline void cleancache_init_shared_fs(struct super_block *sb)
 
 static inline int cleancache_get_page(struct page *page)
 {
-       int ret = -1;
-
        if (cleancache_enabled && cleancache_fs_enabled(page))
-               ret = __cleancache_get_page(page);
-       return ret;
+               return __cleancache_get_page(page);
+       return -1;
 }
 
 static inline void cleancache_put_page(struct page *page)
index d1e49d52b6407979e19e9c1fc788a53bdb48e1ae..de179993e039d41d7e9034b5744be40954f81c09 100644 (file)
@@ -10,3 +10,8 @@
 #undef uninitialized_var
 #define uninitialized_var(x) x = *(&(x))
 #endif
+
+/* same as gcc, this was present in clang-2.6 so we can assume it works
+ * with any version that can compile the kernel
+ */
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
index 88a4215125bce6be0ac47ef1fda2948eead30c27..d0bf555b6bbfb2344d199eeadf338c5183f72852 100644 (file)
@@ -464,29 +464,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
 int cpufreq_register_governor(struct cpufreq_governor *governor);
 void cpufreq_unregister_governor(struct cpufreq_governor *governor);
 
-/* CPUFREQ DEFAULT GOVERNOR */
-/*
- * Performance governor is fallback governor if any other gov failed to auto
- * load due latency restrictions
- */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
-extern struct cpufreq_governor cpufreq_gov_performance;
-#endif
-#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_performance)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
-extern struct cpufreq_governor cpufreq_gov_powersave;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_powersave)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
-extern struct cpufreq_governor cpufreq_gov_userspace;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_userspace)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
-extern struct cpufreq_governor cpufreq_gov_ondemand;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_ondemand)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
-extern struct cpufreq_governor cpufreq_gov_conservative;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_conservative)
-#endif
+struct cpufreq_governor *cpufreq_default_governor(void);
+struct cpufreq_governor *cpufreq_fallback_governor(void);
 
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
index 85a868ccb4931d374a1ee9fb4e4036bb84399561..fea160ee5803fd121d0493f622e240b4c35da480 100644 (file)
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
        task_unlock(current);
 }
 
+extern void cpuset_post_attach_flush(void);
+
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
        return false;
 }
 
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
index 48b49305716bd728e69477db6b430c34e7781746..be8f12b8f1950499380c10de27ab6928df25fd8e 100644 (file)
@@ -59,7 +59,8 @@ enum {
        CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
        CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
        CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
-       CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
+       CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
+       CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
 };
 
 /*
@@ -205,6 +206,11 @@ struct crush_map {
         * mappings line up a bit better with previous mappings. */
        __u8 chooseleaf_vary_r;
 
+       /* if true, it makes chooseleaf firstn to return stable results (if
+        * no local retry) so that data migrations would be optimal when some
+        * device fails. */
+       __u8 chooseleaf_stable;
+
 #ifndef __KERNEL__
        /*
         * version 0 (original) of straw_calc has various flaws.  version 1
index 8204c3dc3800f37839513d7f2c0567d6a41f87a7..818e45078929b94af5780a8c91f0e84d82da8e0c 100644 (file)
@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
                dax_iodone_t);
 int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
                dax_iodone_t);
+
+#ifdef CONFIG_FS_DAX
+struct page *read_dax_sector(struct block_device *bdev, sector_t n);
+#else
+static inline struct page *read_dax_sector(struct block_device *bdev,
+               sector_t n)
+{
+       return ERR_PTR(-ENXIO);
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
                                unsigned int flags, get_block_t, dax_iodone_t);
index 251a2090a55444cec55ce4f04510b6ef83a69cfb..e0ee0b3000b2da107c975137165fc989777d8a58 100644 (file)
@@ -19,6 +19,8 @@
 
 int devpts_new_index(struct inode *ptmx_inode);
 void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
 /* mknod in devpts */
 struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
                void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
 /* Dummy stubs in the no-pty case */
 static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
 static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
 static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
                dev_t device, int index, void *priv)
 {
index 16a1cad30c337369e62efab4a808397853d7b26a..0a9a0ba1998b01b97304322151cd24a42e2f8e57 100644 (file)
@@ -401,6 +401,7 @@ enum dma_residue_granularity {
  *     since the enum dma_transfer_direction is not defined as bits for each
  *     type of direction, the dma controller should fill (1 << <TYPE>) and same
  *     should be checked by controller as well
+ * @max_burst: max burst capability per-transfer
  * @cmd_pause: true, if pause and thereby resume is supported
  * @cmd_terminate: true, if terminate cmd is supported
  * @residue_granularity: granularity of the reported transfer residue
@@ -411,6 +412,7 @@ struct dma_slave_caps {
        u32 src_addr_widths;
        u32 dst_addr_widths;
        u32 directions;
+       u32 max_burst;
        bool cmd_pause;
        bool cmd_terminate;
        enum dma_residue_granularity residue_granularity;
@@ -654,6 +656,7 @@ struct dma_filter {
  *     the enum dma_transfer_direction is not defined as bits for
  *     each type of direction, the dma controller should fill (1 <<
  *     <TYPE>) and same should be checked by controller as well
+ * @max_burst: max burst capability per-transfer
  * @residue_granularity: granularity of the transfer residue reported
  *     by tx_status
  * @device_alloc_chan_resources: allocate resources and return the
@@ -712,6 +715,7 @@ struct dma_device {
        u32 src_addr_widths;
        u32 dst_addr_widths;
        u32 directions;
+       u32 max_burst;
        bool descriptor_reuse;
        enum dma_residue_granularity residue_granularity;
 
index ff8b55359648c9cbb91d91cf2c123a5911b806b9..0de21e9359760d5b19640b5a532398f8a800b7fa 100644 (file)
@@ -15,6 +15,7 @@
 #define QTREE_DEL_REWRITE 6
 
 struct dquot;
+struct kqid;
 
 /* Operations */
 struct qtree_fmt_operations {
@@ -52,5 +53,6 @@ static inline int qtree_depth(struct qtree_mem_dqinfo *info)
                entries *= epb;
        return i;
 }
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
 
 #endif /* _LINUX_DQBLK_QTREE_H */
index e59c3be921069635c90da255087b12978bf70c9a..f43e6a01a0236ed30674f778d48c6c78a8c2e938 100644 (file)
@@ -21,7 +21,7 @@
 #define F2FS_BLKSIZE                   4096    /* support only 4KB block */
 #define F2FS_BLKSIZE_BITS              12      /* bits for F2FS_BLKSIZE */
 #define F2FS_MAX_EXTENSION             64      /* # of extension entries */
-#define F2FS_BLK_ALIGN(x)      (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x)      (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
 
 #define NULL_ADDR              ((block_t)0)    /* used as block_t addresses */
 #define NEW_ADDR               ((block_t)-1)   /* used as block_t addresses */
@@ -170,12 +170,12 @@ struct f2fs_extent {
 #define F2FS_INLINE_XATTR_ADDRS        50      /* 200 bytes for inline xattrs */
 #define DEF_ADDRS_PER_INODE    923     /* Address Pointers in an Inode */
 #define DEF_NIDS_PER_INODE     5       /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi)    addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
 #define ADDRS_PER_BLOCK                1018    /* Address Pointers in a Direct Block */
 #define NIDS_PER_BLOCK         1018    /* Node IDs in an Indirect Block */
 
-#define ADDRS_PER_PAGE(page, fi)       \
-       (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode)    \
+       (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
 
 #define        NODE_DIR1_BLOCK         (DEF_ADDRS_PER_INODE + 1)
 #define        NODE_DIR2_BLOCK         (DEF_ADDRS_PER_INODE + 2)
@@ -345,7 +345,7 @@ struct f2fs_summary {
 
 struct summary_footer {
        unsigned char entry_type;       /* SUM_TYPE_XXX */
-       __u32 check_sum;                /* summary checksum */
+       __le32 check_sum;               /* summary checksum */
 } __packed;
 
 #define SUM_JOURNAL_SIZE       (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -358,6 +358,12 @@ struct summary_footer {
                                sizeof(struct sit_journal_entry))
 #define SIT_JOURNAL_RESERVED   ((SUM_JOURNAL_SIZE - 2) %\
                                sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED    (SUM_JOURNAL_SIZE - 2 - 8)
+
 /*
  * frequently updated NAT/SIT entries can be stored in the spare area in
  * summary blocks
@@ -387,6 +393,11 @@ struct sit_journal {
        __u8 reserved[SIT_JOURNAL_RESERVED];
 } __packed;
 
+struct f2fs_extra_info {
+       __le64 kbytes_written;
+       __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
 /* 4KB-sized summary block structure */
 struct f2fs_summary_block {
        struct f2fs_summary entries[ENTRIES_IN_SUM];
@@ -394,10 +405,11 @@ struct f2fs_summary_block {
                __le16 n_nats;
                __le16 n_sits;
        };
-       /* spare area is used by NAT or SIT journals */
+       /* spare area is used by NAT or SIT journals or extra info */
        union {
                struct nat_journal nat_j;
                struct sit_journal sit_j;
+               struct f2fs_extra_info info;
        };
        struct summary_footer footer;
 } __packed;
index 1a2046275cdf0353e68a9cf38c5fe8762a62fdc2..a401dc8ad85d3174c1c0ca992f55b7b30d6e3358 100644 (file)
@@ -70,7 +70,7 @@ extern int sysctl_protected_hardlinks;
 struct buffer_head;
 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create);
-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
                        ssize_t bytes, void *private);
 typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
 
@@ -484,9 +484,6 @@ struct block_device {
        int                     bd_fsfreeze_count;
        /* Mutex for freeze */
        struct mutex            bd_fsfreeze_mutex;
-#ifdef CONFIG_FS_DAX
-       int                     bd_map_count;
-#endif
 };
 
 /*
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
 
 static inline bool io_is_direct(struct file *filp)
 {
-       return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
+       return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
 }
 
 static inline int iocb_flags(struct file *file)
index 0639dcc9819523a002de1149da5629d24bce039b..81de7123959d96d97dd57d41d76289b3a8185920 100644 (file)
@@ -165,7 +165,6 @@ struct ftrace_ops {
        ftrace_func_t                   saved_func;
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
-       int                             nr_trampolines;
        struct ftrace_ops_hash          local_hash;
        struct ftrace_ops_hash          *func_hash;
        struct ftrace_ops_hash          old_hash;
index 28ad5f6494b018a2c49493920059de62fcacdb8e..af1f2b24bbe4172f6055407229f0670ee90dc534 100644 (file)
@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-#ifdef CONFIG_CMA
-
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 /* The below functions must be run on a range from a single zone. */
 extern int alloc_contig_range(unsigned long start, unsigned long end,
                              unsigned migratetype);
 extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+#endif
 
+#ifdef CONFIG_CMA
 /* CMA stuff */
 extern void init_cma_reserved_pageblock(struct page *page);
-
 #endif
 
 #endif /* __LINUX_GFP_H */
index 76dd4f0da5ca9907572bea19f4b0d0dbe8ad06eb..2ead22dd74a00896d24fdb937242e8ba36a77cfe 100644 (file)
@@ -87,7 +87,8 @@ enum hrtimer_restart {
  * @function:  timer expiry callback function
  * @base:      pointer to the timer base (per cpu and per clock)
  * @state:     state information (See bit values above)
- * @start_pid: timer statistics field to store the pid of the task which
+ * @is_rel:    Set if the timer was armed relative
+ * @start_pid:  timer statistics field to store the pid of the task which
  *             started the timer
  * @start_site:        timer statistics field to store the site where the timer
  *             was started
@@ -101,7 +102,8 @@ struct hrtimer {
        ktime_t                         _softexpires;
        enum hrtimer_restart            (*function)(struct hrtimer *);
        struct hrtimer_clock_base       *base;
-       unsigned long                   state;
+       u8                              state;
+       u8                              is_rel;
 #ifdef CONFIG_TIMER_STATS
        int                             start_pid;
        void                            *start_site;
@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
 
 #endif
 
+static inline ktime_t
+__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
+{
+       ktime_t rem = ktime_sub(timer->node.expires, now);
+
+       /*
+        * Adjust relative timers for the extra we added in
+        * hrtimer_start_range_ns() to prevent short timeouts.
+        */
+       if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
+               rem.tv64 -= hrtimer_resolution;
+       return rem;
+}
+
+static inline ktime_t
+hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
+{
+       return __hrtimer_expires_remaining_adjusted(timer,
+                                                   timer->base->get_time());
+}
+
 extern void clock_was_set(void);
 #ifdef CONFIG_TIMERFD
 extern void timerfd_clock_was_set(void);
@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
 }
 
 /* Query timers: */
-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+
+static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+       return __hrtimer_get_remaining(timer, false);
+}
 
 extern u64 hrtimer_get_next_event(void);
 
index 452c0b0d2f3219dc0f11bec80878e5ea0b8e8803..3b1f6cef95136fa381d06eb5ccb409dde3e030a0 100644 (file)
@@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
 /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
 #define IEEE80211_MAX_FRAME_LEN                2352
 
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839         3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935         7935
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895                3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991                7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454       11454
+
 #define IEEE80211_MAX_SSID_LEN         32
 
 #define IEEE80211_MAX_MESH_ID_LEN      32
@@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits {
 };
 
 #define WLAN_SA_QUERY_TR_ID_LEN 2
+#define WLAN_MEMBERSHIP_LEN 8
+#define WLAN_USER_POSITION_LEN 16
 
 /**
  * struct ieee80211_tpc_report_ie
@@ -989,6 +999,11 @@ struct ieee80211_mgmt {
                                        u8 action_code;
                                        u8 operating_mode;
                                } __packed vht_opmode_notif;
+                               struct {
+                                       u8 action_code;
+                                       u8 membership[WLAN_MEMBERSHIP_LEN];
+                                       u8 position[WLAN_USER_POSITION_LEN];
+                               } __packed vht_group_notif;
                                struct {
                                        u8 action_code;
                                        u8 dialog_token;
@@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation {
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895                 0x00000000
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991                 0x00000001
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454                        0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK                                0x00000003
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ               0x00000004
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ      0x00000008
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK                 0x0000000C
@@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode {
 #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED    BIT(5)
 #define WLAN_EXT_CAPA8_OPMODE_NOTIF    BIT(6)
 
+/* Defines the maximal number of MSDUs in an A-MSDU. */
+#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB   BIT(7)
+#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB   BIT(0)
+
+/*
+ * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY
+ * information element
+ */
+#define WLAN_EXT_CAPA9_FTM_INITIATOR   BIT(7)
+
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE  0x2
 
index b84e49c3a738fc4c2e726c31d78b85296e68a5b5..174f43f43affc1dcfd7a4a67a0efcf06547607fb 100644 (file)
@@ -24,6 +24,7 @@ struct team_pcpu_stats {
        struct u64_stats_sync   syncp;
        u32                     rx_dropped;
        u32                     tx_dropped;
+       u32                     rx_nohandler;
 };
 
 struct team;
index f28dff313b07c3bb072dffc2235b27d5d0eaa09a..a5c539fa5d2bc03ba233f4d11de1b64d839990ab 100644 (file)
@@ -133,8 +133,9 @@ struct iommu_dm_region {
 
 /**
  * struct iommu_ops - iommu ops and capabilities
- * @domain_init: init iommu domain
- * @domain_destroy: destroy iommu domain
+ * @capable: check capability
+ * @domain_alloc: allocate iommu domain
+ * @domain_free: free iommu domain
  * @attach_dev: attach device to an iommu domain
  * @detach_dev: detach device from an iommu domain
  * @map: map a physically contiguous memory region to an iommu domain
@@ -144,8 +145,15 @@ struct iommu_dm_region {
  * @iova_to_phys: translate iova to physical address
  * @add_device: add device to iommu grouping
  * @remove_device: remove device from iommu grouping
+ * @device_group: find iommu group for a particular device
  * @domain_get_attr: Query domain attributes
  * @domain_set_attr: Change domain attributes
+ * @get_dm_regions: Request list of direct mapping requirements for a device
+ * @put_dm_regions: Free list of direct mapping requirements for a device
+ * @domain_window_enable: Configure and enable a particular window for a domain
+ * @domain_window_disable: Disable a particular window for a domain
+ * @domain_set_windows: Set the number of windows for a domain
+ * @domain_get_windows: Return the number of windows for a domain
  * @of_xlate: add OF master IDs to iommu grouping
  * @pgsize_bitmap: bitmap of supported page sizes
  * @priv: per-instance data private to the iommu driver
@@ -182,9 +190,9 @@ struct iommu_ops {
        int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
                                    phys_addr_t paddr, u64 size, int prot);
        void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
-       /* Set the numer of window per domain */
+       /* Set the number of windows per domain */
        int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
-       /* Get the numer of window per domain */
+       /* Get the number of windows per domain */
        u32 (*domain_get_windows)(struct iommu_domain *domain);
 
 #ifdef CONFIG_OF_IOMMU
index f64622ad02c196185be2eb79269e782d5772fcd0..04579d9fbce4f3b5c0a8484116500ee3034b93f7 100644 (file)
@@ -70,6 +70,7 @@ struct irq_fwspec {
  */
 enum irq_domain_bus_token {
        DOMAIN_BUS_ANY          = 0,
+       DOMAIN_BUS_WIRED,
        DOMAIN_BUS_PCI_MSI,
        DOMAIN_BUS_PLATFORM_MSI,
        DOMAIN_BUS_NEXUS,
index 851821bfd55321dce527f4b32e03d1534994678a..bec2abbd7ab28485cbf32bfefa7430b6a47c81e4 100644 (file)
@@ -526,6 +526,7 @@ enum ata_lpm_policy {
 enum ata_lpm_hints {
        ATA_LPM_EMPTY           = (1 << 0), /* port empty/probing */
        ATA_LPM_HIPM            = (1 << 1), /* may use HIPM */
+       ATA_LPM_WAKE_ONLY       = (1 << 2), /* only wake up link */
 };
 
 /* forward declarations */
index 9ae48d4aeb5ec7d6fafa9528c79f65f62a091ed3..792c8981e63365b06a03e7feda7e647eafb533c0 100644 (file)
@@ -51,7 +51,7 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_SWAP,           /* # of pages, swapped out */
        MEM_CGROUP_STAT_NSTATS,
        /* default hierarchy stats */
-       MEMCG_SOCK,
+       MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
        MEMCG_NR_STAT,
 };
 
index f1cd22f2df1ac50438e7d70bb0df85c44580b8d3..516e149443397dcf712e4e62aca6fb3c78e542b6 100644 (file)
@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
 #endif
 
 #ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK       VM_GROWSUP
 #else
-#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK       VM_GROWSDOWN
 #endif
 
+#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+
 /*
  * Special vmas that are non-mergable, non-mlock()able.
  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
@@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
                !vma_growsup(vma->vm_next, addr);
 }
 
-extern struct task_struct *task_of_stack(struct task_struct *task,
-                               struct vm_area_struct *vma, bool in_group);
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
index d3ebb9d21a5334d26e85bc865d318535f5864569..624b78b848b89fae506faa1f4f48ce86297ceace 100644 (file)
@@ -424,9 +424,9 @@ struct mm_struct {
        unsigned long total_vm;         /* Total pages mapped */
        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
        unsigned long pinned_vm;        /* Refcount permanently increased */
-       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
-       unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
-       unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
+       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+       unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+       unsigned long stack_vm;         /* VM_STACK */
        unsigned long def_flags;
        unsigned long start_code, end_code, start_data, end_data;
        unsigned long start_brk, brk, start_stack;
index 33bb1b19273e3ad165382f4c7f0f3cd8f8c0b407..7b6c2cfee390e939e13b0f14c92224bcb3aafcf4 100644 (file)
@@ -682,6 +682,12 @@ typedef struct pglist_data {
         */
        unsigned long first_deferred_pfn;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       spinlock_t split_queue_lock;
+       struct list_head split_queue;
+       unsigned long split_queue_len;
+#endif
 } pg_data_t;
 
 #define node_present_pages(nid)        (NODE_DATA(nid)->node_present_pages)
index 4560d8f1545d2cec7e6da19da460fff26151c105..2bb0c308570672e7105b14f85a4215b0f9500207 100644 (file)
@@ -324,6 +324,12 @@ struct module_layout {
 #define __module_layout_align
 #endif
 
+struct mod_kallsyms {
+       Elf_Sym *symtab;
+       unsigned int num_symtab;
+       char *strtab;
+};
+
 struct module {
        enum module_state state;
 
@@ -405,15 +411,10 @@ struct module {
 #endif
 
 #ifdef CONFIG_KALLSYMS
-       /*
-        * We keep the symbol and string tables for kallsyms.
-        * The core_* fields below are temporary, loader-only (they
-        * could really be discarded after module init).
-        */
-       Elf_Sym *symtab, *core_symtab;
-       unsigned int num_symtab, core_num_syms;
-       char *strtab, *core_strtab;
-
+       /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+       struct mod_kallsyms *kallsyms;
+       struct mod_kallsyms core_kallsyms;
+       
        /* Section attributes */
        struct module_sect_attrs *sect_attrs;
 
index 36bb6a503f196ea04e4184c1445aa142db906607..3bf8f954b642581c271a7236e8e1b46f6820b015 100644 (file)
@@ -166,7 +166,6 @@ struct bbm_info {
 };
 
 /* OneNAND BBT interface */
-extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
 extern int onenand_default_bbt(struct mtd_info *mtd);
 
 #endif /* __LINUX_MTD_BBM_H */
index 02cd5f9b79b875ed03e5b9042df1cf98eff60709..8255118be0f0a508fdee0b6a83ba148e0d837bb9 100644 (file)
@@ -44,7 +44,6 @@ struct INFTLrecord {
        unsigned int nb_blocks;         /* number of physical blocks */
        unsigned int nb_boot_blocks;    /* number of blocks used by the bios */
        struct erase_info instr;
-       struct nand_ecclayout oobinfo;
 };
 
 int INFTL_mount(struct INFTLrecord *s);
index bdd68e22b5a59d891765d2b5f852a7fabb57d141..7604f4be33865deeee79c50251a2a71e4aebfe38 100644 (file)
@@ -168,6 +168,12 @@ typedef enum {
 /* Device supports subpage reads */
 #define NAND_SUBPAGE_READ      0x00001000
 
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING   0x00002000
+
 /* Options valid for Samsung large page devices */
 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
 
@@ -896,7 +902,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
  * @chip_delay:                R/B delay value in us
  * @options:           Option flags, e.g. 16bit buswidth
  * @bbt_options:       BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @ecclayout:         ECC layout info structure
  * @part_probe_types:  NULL-terminated array of probe types
  */
 struct platform_nand_chip {
@@ -904,7 +909,6 @@ struct platform_nand_chip {
        int chip_offset;
        int nr_partitions;
        struct mtd_partition *partitions;
-       struct nand_ecclayout *ecclayout;
        int chip_delay;
        unsigned int options;
        unsigned int bbt_options;
index b059629e22bc6901f9f17007672b0ee2f24191ec..044daa02b8ff94fde7531d4e9c4ebe548d5d49ef 100644 (file)
@@ -50,7 +50,6 @@ struct NFTLrecord {
         unsigned int nb_blocks;                /* number of physical blocks */
         unsigned int nb_boot_blocks;   /* number of blocks used by the bios */
         struct erase_info instr;
-       struct nand_ecclayout oobinfo;
 };
 
 int NFTL_mount(struct NFTLrecord *s);
index 5ac140dcb789499e51b951fa45fb34378d636407..219f53c30cb3cd4a9a2fdd1c2a2d3ca343f28bca 100644 (file)
@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
        clear_bit(NAPI_STATE_NPSVC, &n->state);
 }
 
-#ifdef CONFIG_SMP
 /**
  *     napi_synchronize - wait until NAPI is not running
  *     @n: napi context
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
  */
 static inline void napi_synchronize(const struct napi_struct *n)
 {
-       while (test_bit(NAPI_STATE_SCHED, &n->state))
-               msleep(1);
+       if (IS_ENABLED(CONFIG_SMP))
+               while (test_bit(NAPI_STATE_SCHED, &n->state))
+                       msleep(1);
+       else
+               barrier();
 }
-#else
-# define napi_synchronize(n)   barrier()
-#endif
 
 enum netdev_queue_state_t {
        __QUEUE_STATE_DRV_XOFF,
@@ -1398,6 +1397,8 @@ enum netdev_priv_flags {
  *                     do not use this in drivers
  *     @tx_dropped:    Dropped packets by core network,
  *                     do not use this in drivers
+ *     @rx_nohandler:  nohandler dropped packets by core network on
+ *                     inactive devices, do not use this in drivers
  *
  *     @wireless_handlers:     List of functions to handle Wireless Extensions,
  *                             instead of ioctl,
@@ -1612,6 +1613,7 @@ struct net_device {
 
        atomic_long_t           rx_dropped;
        atomic_long_t           tx_dropped;
+       atomic_long_t           rx_nohandler;
 
 #ifdef CONFIG_WIRELESS_EXT
        const struct iw_handler_def *   wireless_handlers;
@@ -3742,7 +3744,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
 
 /* RSS keys are 40 or 52 bytes long */
 #define NETDEV_RSS_KEY_LEN 52
-extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 void netdev_rss_key_fill(void *buffer, size_t len);
 
 int dev_get_nest_level(struct net_device *dev,
index 48e0320cd6432463b86cc3d7684d5ecd4ab48651..67300f8e5f2f0248e8651cbe17e5c14e1110ca2c 100644 (file)
@@ -550,9 +550,7 @@ extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
 
 static inline loff_t nfs_size_to_loff_t(__u64 size)
 {
-       if (size > (__u64) OFFSET_MAX - 1)
-               return OFFSET_MAX - 1;
-       return (loff_t) size;
+       return min_t(u64, size, OFFSET_MAX);
 }
 
 static inline ino_t
index dd10626a615fb160587ecf09f860de36ba583c19..dc6e39696b64c8e925601f598b9cdab5684f1970 100644 (file)
@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
        return num;
 }
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && !defined(MODULE)
 #define _OF_DECLARE(table, name, compat, fn, fn_type)                  \
        static const struct of_device_id __of_table_##name              \
                __used __section(__##table##_of_table)                  \
index 27df4a6585daedcc6a74865bf048cfd06a0593ba..3d371c15075316670f37ee2ac284d9befc9f9de8 100644 (file)
@@ -746,9 +746,26 @@ struct pci_driver {
        .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
        .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
 
+enum {
+       PCI_REASSIGN_ALL_RSRC   = 0x00000001,   /* ignore firmware setup */
+       PCI_REASSIGN_ALL_BUS    = 0x00000002,   /* reassign all bus numbers */
+       PCI_PROBE_ONLY          = 0x00000004,   /* use existing setup */
+       PCI_CAN_SKIP_ISA_ALIGN  = 0x00000008,   /* don't do ISA alignment */
+       PCI_ENABLE_PROC_DOMAINS = 0x00000010,   /* enable domains in /proc */
+       PCI_COMPAT_DOMAIN_0     = 0x00000020,   /* ... except domain 0 */
+       PCI_SCAN_ALL_PCIE_DEVS  = 0x00000040,   /* scan all, not just dev 0 */
+};
+
 /* these external functions are only available when PCI support is enabled */
 #ifdef CONFIG_PCI
 
+extern unsigned int pci_flags;
+
+static inline void pci_set_flags(int flags) { pci_flags = flags; }
+static inline void pci_add_flags(int flags) { pci_flags |= flags; }
+static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
+static inline int pci_has_flag(int flag) { return pci_flags & flag; }
+
 void pcie_bus_configure_settings(struct pci_bus *bus);
 
 enum pcie_bus_config_types {
@@ -1405,6 +1422,11 @@ void pci_register_set_vga_state(arch_set_vga_state_t func);
 
 #else /* CONFIG_PCI is not enabled */
 
+static inline void pci_set_flags(int flags) { }
+static inline void pci_add_flags(int flags) { }
+static inline void pci_clear_flags(int flags) { }
+static inline int pci_has_flag(int flag) { return 0; }
+
 /*
  *  If the system does not have PCI, clearly these return errors.  Define
  *  these as simple inline functions to avoid hair in drivers.
index f9828a48f16addab4737683f3c8345ab87e52a0a..b35a61a481fa0460b3b32591e6dd52f2fa9077e7 100644 (file)
@@ -634,9 +634,6 @@ struct perf_event_context {
        int                             nr_cgroups;      /* cgroup evts */
        void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
-
-       struct delayed_work             orphans_remove;
-       bool                            orphans_remove_sched;
 };
 
 /*
@@ -729,7 +726,7 @@ extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
-extern struct perf_event *perf_event_get(unsigned int fd);
+extern struct file *perf_event_get(unsigned int fd);
 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
@@ -1044,7 +1041,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
 extern u64 perf_swevent_set_period(struct perf_event *event);
 extern void perf_event_enable(struct perf_event *event);
 extern void perf_event_disable(struct perf_event *event);
-extern int __perf_event_disable(void *info);
+extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
@@ -1070,7 +1067,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
 static inline void perf_event_delayed_put(struct task_struct *task)    { }
-static inline struct perf_event *perf_event_get(unsigned int fd)       { return ERR_PTR(-EINVAL); }
+static inline struct file *perf_event_get(unsigned int fd)     { return ERR_PTR(-EINVAL); }
 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 {
        return ERR_PTR(-EINVAL);
index 0703b5360d31b5fa599a541bce9881941a170281..37448ab5fb5c2eddc80b97125ee80ecff4bc1b58 100644 (file)
@@ -29,7 +29,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
        return __pfn_to_pfn_t(pfn, 0);
 }
 
-extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
+extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
 
 static inline bool pfn_t_has_page(pfn_t pfn)
 {
@@ -48,7 +48,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
        return NULL;
 }
 
-static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
+static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
 {
        return PFN_PHYS(pfn_t_to_pfn(pfn));
 }
index 36bb92172f4795ff806275f8777d5ecf0bd93b1a..c55e42ee57fa0c9ba3083d6bf03b04fae275d13c 100644 (file)
@@ -40,7 +40,6 @@ struct s3c2410_nand_set {
        char                    *name;
        int                     *nr_map;
        struct mtd_partition    *partitions;
-       struct nand_ecclayout   *ecc_layout;
 };
 
 struct s3c2410_platform_nand {
index 95403d2ccaf56b70207fa09e65c592d882fb30fd..cccaf4a29e9f02c9a60b65f73a523a69efa5af3a 100644 (file)
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
 
 int dev_pm_opp_get_opp_count(struct device *dev);
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
 void dev_pm_opp_put_supported_hw(struct device *dev);
 int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
 void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
 #else
 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
@@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
        return 0;
 }
 
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+       return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+       return 0;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 {
        return NULL;
@@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 
 static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
 
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+       return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+       return -EINVAL;
+}
+
 #endif         /* CONFIG_PM_OPP */
 
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
index 12c4865457adc3d0412c573b710feec7d3d6fd81..393efe2edf9afb9d8a38d1a16201e78c64881026 100644 (file)
@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
 bool psci_power_state_loses_context(u32 state);
 bool psci_power_state_is_valid(u32 state);
 
+int psci_cpu_init_idle(unsigned int cpu);
+int psci_cpu_suspend_enter(unsigned long index);
+
 struct psci_operations {
        int (*cpu_suspend)(u32 state, unsigned long entry_point);
        int (*cpu_off)(u32 state);
index 9e12000914b3451107abdde161e1ea64b7cfeb5e..1e36898edbda783215113a857c60003d736abd1c 100644 (file)
@@ -29,6 +29,12 @@ extern bool qcom_scm_hdcp_available(void);
 extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
                u32 *resp);
 
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
 #define QCOM_SCM_CPU_PWR_DOWN_L2_ON    0x0
 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF   0x1
 
index b2505acfd3c078c70e733f7d9e826cfc4b6c9524..9dfb6bce8c9eb08f0c45a8d1b76f86b0b489b30e 100644 (file)
@@ -306,6 +306,7 @@ struct quota_format_ops {
        int (*read_dqblk)(struct dquot *dquot);         /* Read structure for one user */
        int (*commit_dqblk)(struct dquot *dquot);       /* Write structure for one user */
        int (*release_dqblk)(struct dquot *dquot);      /* Called when last reference to dquot is being dropped */
+       int (*get_next_id)(struct super_block *sb, struct kqid *qid);   /* Get next ID with existing structure in the quota file */
 };
 
 /* Operations working with dquots */
@@ -321,6 +322,8 @@ struct dquot_operations {
         * quota code only */
        qsize_t *(*get_reserved_space) (struct inode *);
        int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
+       /* Get next ID with active quota structure */
+       int (*get_next_id) (struct super_block *sb, struct kqid *qid);
 };
 
 struct path;
@@ -425,6 +428,8 @@ struct quotactl_ops {
        int (*quota_sync)(struct super_block *, int);
        int (*set_info)(struct super_block *, int, struct qc_info *);
        int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+       int (*get_nextdqblk)(struct super_block *, struct kqid *,
+                            struct qc_dqblk *);
        int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*get_state)(struct super_block *, struct qc_state *);
        int (*rm_xquota)(struct super_block *, unsigned int);
index 7a57c28eb5e708d0346760818533d7b6f8665096..f00fa86ac9660ad79e243f9c9e590bf854dcb48d 100644 (file)
@@ -82,6 +82,7 @@ int dquot_commit(struct dquot *dquot);
 int dquot_acquire(struct dquot *dquot);
 int dquot_release(struct dquot *dquot);
 int dquot_commit_info(struct super_block *sb, int type);
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid);
 int dquot_mark_dquot_dirty(struct dquot *dquot);
 
 int dquot_file_open(struct inode *inode, struct file *file);
@@ -99,6 +100,8 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state);
 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
 int dquot_get_dqblk(struct super_block *sb, struct kqid id,
                struct qc_dqblk *di);
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *id,
+               struct qc_dqblk *di);
 int dquot_set_dqblk(struct super_block *sb, struct kqid id,
                struct qc_dqblk *di);
 
index 7c88ad156a293c0bedcea771b58f3113909b6532..f54be708220760f9a367f4a29ca1cbe7cfb7fba2 100644 (file)
@@ -378,13 +378,29 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
 void **radix_tree_next_chunk(struct radix_tree_root *root,
                             struct radix_tree_iter *iter, unsigned flags);
 
+/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter:      iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true.  If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+       iter->next_index = iter->index;
+       return NULL;
+}
+
 /**
  * radix_tree_chunk_size - get current chunk size
  *
  * @iter:      pointer to radix tree iterator
  * Returns:    current chunk size
  */
-static __always_inline unsigned
+static __always_inline long
 radix_tree_chunk_size(struct radix_tree_iter *iter)
 {
        return iter->next_index - iter->index;
@@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
                        return slot + offset + 1;
                }
        } else {
-               unsigned size = radix_tree_chunk_size(iter) - 1;
+               long size = radix_tree_chunk_size(iter);
 
-               while (size--) {
+               while (--size > 0) {
                        slot++;
                        iter->index++;
                        if (likely(*slot))
index a7a06d1dcf9cb17ee65d1befe1ef6ac62d882d16..a0118d5929a9c9ed17df3b75a8b599472ff1f205 100644 (file)
@@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
 
 # define jiffies       raid6_jiffies()
 # define printk        printf
+# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
+# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
 # define GFP_KERNEL    0
 # define __get_free_pages(x, y)        ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
                                                     PROT_READ|PROT_WRITE,   \
index d9010789b4e8215cc74a2ffc12e83f3cfd9b73cd..7af625f6d226a4d16266107282668fdb9bfc3bdc 100644 (file)
@@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill);
  *
  * Pause polling -- say transmitter is off for other reasons.
  * NOTE: not necessary for suspend/resume -- in that case the
- * core stops polling anyway
+ * core stops polling anyway (but will also correctly handle
+ * the case of polling having been paused before suspend.)
  */
 void rfkill_pause_polling(struct rfkill *rfkill);
 
index bdf597c4f0be82965911266ed1668d06510492cb..a07f42bedda32687c94ed574f5a606b19912b042 100644 (file)
@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
                __put_anon_vma(anon_vma);
 }
 
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
-       struct anon_vma *anon_vma = vma->anon_vma;
-       if (anon_vma)
-               down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
-       struct anon_vma *anon_vma = vma->anon_vma;
-       if (anon_vma)
-               up_write(&anon_vma->root->rwsem);
-}
-
 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 {
        down_write(&anon_vma->root->rwsem);
index 11f935c1a090419d6cda938aa925bfa79de3616b..4ce9ff7086f4897a67f6037d88df21012cf28413 100644 (file)
@@ -299,6 +299,7 @@ struct sk_buff;
 #else
 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
+extern int sysctl_max_skb_frags;
 
 typedef struct skb_frag_struct skb_frag_t;
 
index d0cb6d189a0a02bd28459c7b143229563f23c4e4..bd51c8a9d80733aaf393adc4a57c7c5f73ade535 100644 (file)
@@ -26,6 +26,8 @@ struct qcom_smd_device {
        struct qcom_smd_channel *channel;
 };
 
+typedef int (*qcom_smd_cb_t)(struct qcom_smd_device *, const void *, size_t);
+
 /**
  * struct qcom_smd_driver - smd driver struct
  * @driver:    underlying device driver
@@ -42,7 +44,7 @@ struct qcom_smd_driver {
 
        int (*probe)(struct qcom_smd_device *dev);
        void (*remove)(struct qcom_smd_device *dev);
-       int (*callback)(struct qcom_smd_device *, const void *, size_t);
+       qcom_smd_cb_t callback;
 };
 
 int qcom_smd_driver_register(struct qcom_smd_driver *drv);
@@ -54,4 +56,8 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
 
 int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
 
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev,
+                                              const char *name,
+                                              qcom_smd_cb_t cb);
+
 #endif
index f35e1512fcaa1bb723da9f5825a2bc8406071fca..7b88697929e9ef6f729ce9fd1d9e512d352f8b94 100644 (file)
@@ -1,12 +1,17 @@
 #ifndef __QCOM_SMEM_STATE__
 #define __QCOM_SMEM_STATE__
 
+#include <linux/errno.h>
+
+struct device_node;
 struct qcom_smem_state;
 
 struct qcom_smem_state_ops {
        int (*update_bits)(void *, u32, u32);
 };
 
+#ifdef CONFIG_QCOM_SMEM_STATE
+
 struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
 void qcom_smem_state_put(struct qcom_smem_state *);
 
@@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val
 struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data);
 void qcom_smem_state_unregister(struct qcom_smem_state *state);
 
+#else
+
+static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+       const char *con_id, unsigned *bit)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+}
+
+static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+       u32 mask, u32 value)
+{
+       return -EINVAL;
+}
+
+static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+       const struct qcom_smem_state_ops *ops, void *data)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+}
+
+#endif
+
 #endif
similarity index 81%
rename from arch/arm/mach-exynos/exynos-pmu.h
rename to include/linux/soc/samsung/exynos-pmu.h
index a2ab0d52b2304868636ae1badd30936fab6c5ec4..e2e9de1acc5b789534892853f7f4b63d6d3cc0b6 100644 (file)
@@ -9,8 +9,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef __EXYNOS_PMU_H
-#define __EXYNOS_PMU_H
+#ifndef __LINUX_SOC_EXYNOS_PMU_H
+#define __LINUX_SOC_EXYNOS_PMU_H
 
 enum sys_powerdown {
        SYS_AFTR,
@@ -21,4 +21,4 @@ enum sys_powerdown {
 
 extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
 
-#endif /* __EXYNOS_PMU_H */
+#endif /* __LINUX_SOC_EXYNOS_PMU_H */
similarity index 99%
rename from arch/arm/mach-exynos/regs-pmu.h
rename to include/linux/soc/samsung/exynos-regs-pmu.h
index 5e4f4c23b06a11d19c377d2a50af373d49d3b5bc..d30186e2b60946a41ced4fdaad421f0ad4db90ac 100644 (file)
@@ -9,8 +9,8 @@
  * published by the Free Software Foundation.
 */
 
-#ifndef __ASM_ARCH_REGS_PMU_H
-#define __ASM_ARCH_REGS_PMU_H __FILE__
+#ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H
+#define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__
 
 #define S5P_CENTRAL_SEQ_CONFIGURATION          0x0200
 
                                         | EXYNOS5420_KFC_USE_STANDBY_WFI2  \
                                         | EXYNOS5420_KFC_USE_STANDBY_WFI3)
 
-#endif /* __ASM_ARCH_REGS_PMU_H */
+#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
index e7a018eaf3a255db2198049f543ea3111004852a..017fced60242ecdf5eaf9daeb0b035e18333d6e3 100644 (file)
@@ -1,10 +1,13 @@
 #ifndef __LINUX_SWIOTLB_H
 #define __LINUX_SWIOTLB_H
 
+#include <linux/dma-direction.h>
+#include <linux/init.h>
 #include <linux/types.h>
 
 struct device;
 struct dma_attrs;
+struct page;
 struct scatterlist;
 
 extern int swiotlb_force;
index b386361ba3e87226c329924bc1992252fcf0b9d6..d909feeeaea25f437505734f24129c54d86719de 100644 (file)
@@ -256,6 +256,7 @@ struct tcp_sock {
        u32     prr_delivered;  /* Number of newly delivered packets to
                                 * receiver in Recovery. */
        u32     prr_out;        /* Total number of pkts sent during Recovery. */
+       u32     delivered;      /* Total data packets delivered incl. rexmits */
 
        u32     rcv_wnd;        /* Current receiver window              */
        u32     write_seq;      /* Tail(+1) of data held in tcp send buffer */
index 2fd8708ea88893cf57e03f0ae23688ac42c9d24c..d9fb4b043f56dd45b406d3b68c59da3c860c1f43 100644 (file)
@@ -649,6 +649,7 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
 /* tty_mutex.c */
 /* functions for preparation of BKL removal */
 extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern int  tty_lock_interruptible(struct tty_struct *tty);
 extern void __lockfunc tty_unlock(struct tty_struct *tty);
 extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
 extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
index 0e32bc71245ef46b90aa21112e4d2bef42cc5950..ca73c503b92a758ad5ce9b6d022c53c0758951c5 100644 (file)
@@ -311,6 +311,7 @@ enum {
 
        __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
        __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
+       __WQ_LEGACY             = 1 << 18, /* internal: create*_workqueue() */
 
        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
        WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
        alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
 
 #define create_workqueue(name)                                         \
-       alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
+       alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
 #define create_freezable_workqueue(name)                               \
-       alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
-                       1, (name))
+       alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
+                       WQ_MEM_RECLAIM, 1, (name))
 #define create_singlethread_workqueue(name)                            \
-       alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+       alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
index e5321fda548935e1bab99fbce565dc7a018d52e9..b3edc14e763f020cf4500d0965d118cc572b3569 100644 (file)
 #ifdef __KERNEL__
 
 #include <linux/videodev2.h>
-
-/* Tuner PADs */
-/* FIXME: is this the right place for it? */
-enum tuner_pad_index {
-       TUNER_PAD_RF_INPUT,
-       TUNER_PAD_IF_OUTPUT,
-       TUNER_NUM_PADS
-};
+#include <media/v4l2-mc.h>
 
 #define ADDR_UNSET (255)
 
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
new file mode 100644 (file)
index 0000000..df11519
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * v4l2-mc.h - Media Controller V4L2 types and prototypes
+ *
+ * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
+ *
+ * @TUNER_PAD_RF_INPUT:        Radiofrequency (RF) sink pad, usually linked to a
+ *                     RF connector entity.
+ * @TUNER_PAD_OUTPUT:  Tuner video output source pad. Contains the video
+ *                     chrominance and luminance or the hole bandwidth
+ *                     of the signal converted to an Intermediate Frequency
+ *                     (IF) or to baseband (on zero-IF tuners).
+ * @TUNER_PAD_AUD_OUT: Tuner audio output source pad. Tuners used to decode
+ *                     analog TV signals have an extra pad for audio output.
+ *                     Old tuners use an analog stage with a saw filter for
+ *                     the audio IF frequency. The output of the pad is, in
+ *                     this case, the audio IF, with should be decoded either
+ *                     by the bridge chipset (that's the case of cx2388x
+ *                     chipsets) or may require an external IF sound
+ *                     processor, like msp34xx. On modern silicon tuners,
+ *                     the audio IF decoder is usually incorporated at the
+ *                     tuner. On such case, the output of this pad is an
+ *                     audio sampled data.
+ * @TUNER_NUM_PADS:    Number of pads of the tuner.
+ */
+enum tuner_pad_index {
+       TUNER_PAD_RF_INPUT,
+       TUNER_PAD_OUTPUT,
+       TUNER_PAD_AUD_OUT,
+       TUNER_NUM_PADS
+};
+
+/**
+ * enum if_vid_dec_index - video IF-PLL pad index for
+ *                        MEDIA_ENT_F_IF_VID_DECODER
+ *
+ * @IF_VID_DEC_PAD_IF_INPUT:   video Intermediate Frequency (IF) sink pad
+ * @IF_VID_DEC_PAD_OUT:                IF-PLL video output source pad. Contains the
+ *                             video chrominance and luminance IF signals.
+ * @IF_VID_DEC_PAD_NUM_PADS:   Number of pads of the video IF-PLL.
+ */
+enum if_vid_dec_pad_index {
+       IF_VID_DEC_PAD_IF_INPUT,
+       IF_VID_DEC_PAD_OUT,
+       IF_VID_DEC_PAD_NUM_PADS
+};
+
+/**
+ * enum if_aud_dec_index - audio/sound IF-PLL pad index for
+ *                        MEDIA_ENT_F_IF_AUD_DECODER
+ *
+ * @IF_AUD_DEC_PAD_IF_INPUT:   audio Intermediate Frequency (IF) sink pad
+ * @IF_AUD_DEC_PAD_OUT:                IF-PLL audio output source pad. Contains the
+ *                             audio sampled stream data, usually connected
+ *                             to the bridge bus via an Inter-IC Sound (I2S)
+ *                             bus.
+ * @IF_AUD_DEC_PAD_NUM_PADS:   Number of pads of the audio IF-PLL.
+ */
+enum if_aud_dec_pad_index {
+       IF_AUD_DEC_PAD_IF_INPUT,
+       IF_AUD_DEC_PAD_OUT,
+       IF_AUD_DEC_PAD_NUM_PADS
+};
+
+/**
+ * enum demod_pad_index - analog TV pad index for MEDIA_ENT_F_ATV_DECODER
+ *
+ * @DEMOD_PAD_IF_INPUT:        IF input sink pad.
+ * @DEMOD_PAD_VID_OUT: Video output source pad.
+ * @DEMOD_PAD_VBI_OUT: Vertical Blank Interface (VBI) output source pad.
+ * @DEMOD_NUM_PADS:    Maximum number of output pads.
+ */
+enum demod_pad_index {
+       DEMOD_PAD_IF_INPUT,
+       DEMOD_PAD_VID_OUT,
+       DEMOD_PAD_VBI_OUT,
+       DEMOD_NUM_PADS
+};
index ef03ae56b1c1cc18d9541697b353a01842b50210..8a0f55b6c2ba80e25c67caf89e2050fa58ae322f 100644 (file)
@@ -533,7 +533,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
                const unsigned int requested_sizes[]);
 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking);
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+                  bool nonblocking);
 
 int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
index 2a91a0561a478393ca9e9d2f1993467cc4c5c9cb..9b4c418bebd84ae0a7debfbcc697ee92b136ec99 100644 (file)
@@ -6,8 +6,8 @@
 #include <linux/mutex.h>
 #include <net/sock.h>
 
-void unix_inflight(struct file *fp);
-void unix_notinflight(struct file *fp);
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
 void unix_gc(void);
 void wait_for_unix_gc(void);
 struct sock *unix_get_socket(struct file *filp);
index d4f82edb5cffe0528719dc416e8b28b71460e485..dc71473462ac9f60680dad7c7cce8e9878b1a8a5 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef __HCI_CORE_H
 #define __HCI_CORE_H
 
+#include <linux/leds.h>
 #include <net/bluetooth/hci.h>
 #include <net/bluetooth/hci_sock.h>
 
@@ -396,6 +397,8 @@ struct hci_dev {
        struct delayed_work     rpa_expired;
        bdaddr_t                rpa;
 
+       struct led_trigger      *power_led;
+
        int (*open)(struct hci_dev *hdev);
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
index 52899291f40144c0dab97e135b0c9762ade5c8d1..5ee3c689c86370b4b325309d337611c9b51af5d1 100644 (file)
@@ -252,6 +252,12 @@ struct l2cap_conn_rsp {
 #define L2CAP_PSM_3DSP         0x0021
 #define L2CAP_PSM_IPSP         0x0023 /* 6LoWPAN */
 
+#define L2CAP_PSM_DYN_START    0x1001
+#define L2CAP_PSM_DYN_END      0xffff
+#define L2CAP_PSM_AUTO_END     0x10ff
+#define L2CAP_PSM_LE_DYN_START  0x0080
+#define L2CAP_PSM_LE_DYN_END   0x00ff
+
 /* channel identifier */
 #define L2CAP_CID_SIGNALING    0x0001
 #define L2CAP_CID_CONN_LESS    0x0002
index f1fbc3b119623de61becd9c45e44c05211b97ec3..f358ad5e421457b0312f7ffff2f766004ebca663 100644 (file)
@@ -306,5 +306,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave);
 int bond_3ad_set_carrier(struct bonding *bond);
 void bond_3ad_update_lacp_rate(struct bonding *bond);
+void bond_3ad_update_ad_actor_settings(struct bonding *bond);
 #endif /* _NET_BOND_3AD_H */
 
index 9bcaaf7cd15ab053a5de9cbf92fd02f0fb0ffaf7..9e1b24c29f0c65ce997d71a566e63125f7a6e248 100644 (file)
@@ -712,6 +712,8 @@ struct cfg80211_acl_data {
  * @p2p_opp_ps: P2P opportunistic PS
  * @acl: ACL configuration used by the drivers which has support for
  *     MAC address based access control
+ * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
+ *     networks.
  */
 struct cfg80211_ap_settings {
        struct cfg80211_chan_def chandef;
@@ -730,6 +732,7 @@ struct cfg80211_ap_settings {
        u8 p2p_ctwindow;
        bool p2p_opp_ps;
        const struct cfg80211_acl_data *acl;
+       bool pbss;
 };
 
 /**
@@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params {
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa:  VHT Capability overrides
  * @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
+ *     networks.
  */
 struct cfg80211_connect_params {
        struct ieee80211_channel *channel;
@@ -1910,6 +1915,7 @@ struct cfg80211_connect_params {
        struct ieee80211_ht_cap ht_capa_mask;
        struct ieee80211_vht_cap vht_capa;
        struct ieee80211_vht_cap vht_capa_mask;
+       bool pbss;
 };
 
 /**
@@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys;
  *     registered for unexpected class 3 frames (AP mode)
  * @conn: (private) cfg80211 software SME connection state machine data
  * @connect_keys: (private) keys to set after connection is established
+ * @conn_bss_type: connecting/connected BSS type
  * @ibss_fixed: (private) IBSS is using fixed BSSID
  * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
@@ -3519,6 +3526,7 @@ struct wireless_dev {
        u8 ssid_len, mesh_id_len, mesh_id_up_len;
        struct cfg80211_conn *conn;
        struct cfg80211_cached_keys *connect_keys;
+       enum ieee80211_bss_type conn_bss_type;
 
        struct list_head event_list;
        spinlock_t event_lock;
index 6816f0fa5693dc275e1e4997375f29b5d99f7b64..30a56ab2ccfb05d7bc9a527ebdc09da19b3d5f7b 100644 (file)
@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
        return dst && !(dst->flags & DST_METADATA);
 }
 
+static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
+                                      const struct sk_buff *skb_b)
+{
+       const struct metadata_dst *a, *b;
+
+       if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
+               return 0;
+
+       a = (const struct metadata_dst *) skb_dst(skb_a);
+       b = (const struct metadata_dst *) skb_dst(skb_b);
+
+       if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
+               return 1;
+
+       return memcmp(&a->u.tun_info, &b->u.tun_info,
+                     sizeof(a->u.tun_info) + a->u.tun_info.options_len);
+}
+
 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
 struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
 
index 877f682989b892fb1d70256bda6b33f03e34a0f5..295d291269e2c88ed4930041597a28f7c9a7a2f7 100644 (file)
@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
 
 void ip6_route_input(struct sk_buff *skb);
 
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
-                                  struct flowi6 *fl6);
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+                                        struct flowi6 *fl6, int flags);
+
+static inline struct dst_entry *ip6_route_output(struct net *net,
+                                                const struct sock *sk,
+                                                struct flowi6 *fl6)
+{
+       return ip6_route_output_flags(net, sk, fl6, 0);
+}
+
 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags);
 
index 6db96ea0144f0445c4e52c51cddcc3d25d60ccdd..dda9abf6b89c14afddf71562c763bdd6bb7d85c3 100644 (file)
@@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
                    u8 *protocol, struct flowi4 *fl4);
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
 
 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
index 8f81bbbc38fc939070a5761e3af90da62faf8d68..e0f4109e64c6fca9ba87d768c2c7b1220a6557f4 100644 (file)
@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
 /* Send a single event to user space */
 void wireless_send_event(struct net_device *dev, unsigned int cmd,
                         union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
 
 /* We may need a function to send a stream of events to user space.
  * More on that later... */
index 7c30faff245f262a59831132f84f5cb37c2cc8a5..57147749ae423eee59083c61ea60b159078e2ac5 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -298,6 +298,7 @@ struct ieee80211_vif_chanctx_switch {
  *     note that this is only called when it changes after the channel
  *     context had been assigned.
  * @BSS_CHANGED_OCB: OCB join status changed
+ * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -323,6 +324,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_BEACON_INFO         = 1<<20,
        BSS_CHANGED_BANDWIDTH           = 1<<21,
        BSS_CHANGED_OCB                 = 1<<22,
+       BSS_CHANGED_MU_GROUPS           = 1<<23,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -435,6 +437,19 @@ struct ieee80211_event {
        } u;
 };
 
+/**
+ * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data
+ *
+ * This structure describes the group id data of VHT MU-MIMO
+ *
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ */
+struct ieee80211_mu_group_data {
+       u8 membership[WLAN_MEMBERSHIP_LEN];
+       u8 position[WLAN_USER_POSITION_LEN];
+};
+
 /**
  * struct ieee80211_bss_conf - holds the BSS's changing parameters
  *
@@ -477,6 +492,7 @@ struct ieee80211_event {
  * @enable_beacon: whether beaconing should be enabled or not
  * @chandef: Channel definition for this BSS -- the hardware might be
  *     configured a higher bandwidth than this BSS uses, for example.
+ * @mu_group: VHT MU-MIMO group membership data
  * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
  *     This field is only valid when the channel is a wide HT/VHT channel.
  *     Note that with TDLS this can be the case (channel is HT, protection must
@@ -535,6 +551,7 @@ struct ieee80211_bss_conf {
        s32 cqm_rssi_thold;
        u32 cqm_rssi_hyst;
        struct cfg80211_chan_def chandef;
+       struct ieee80211_mu_group_data mu_group;
        __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
        int arp_addr_cnt;
        bool qos;
@@ -691,12 +708,14 @@ enum mac80211_tx_info_flags {
  *     protocol frame (e.g. EAP)
  * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
  *     frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
  *
  * These flags are used in tx_info->control.flags.
  */
 enum mac80211_tx_control_flags {
        IEEE80211_TX_CTRL_PORT_CTRL_PROTO       = BIT(0),
        IEEE80211_TX_CTRL_PS_RESPONSE           = BIT(1),
+       IEEE80211_TX_CTRL_RATE_INJECT           = BIT(2),
 };
 
 /*
@@ -993,6 +1012,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
  *     field) is valid and contains the time the last symbol of the MPDU
  *     (including FCS) was received.
+ * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
+ *     field) is valid and contains the time the SYNC preamble was received.
  * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
  * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
  * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
@@ -1014,6 +1035,14 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *     is stored in the @ampdu_delimiter_crc field)
  * @RX_FLAG_LDPC: LDPC was used
+ * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
+ *     processing it in any regular way.
+ *     This is useful if drivers offload some frames but still want to report
+ *     them for sniffing purposes.
+ * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except
+ *     monitor interfaces.
+ *     This is useful if drivers offload some frames but still want to report
+ *     them for sniffing purposes.
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
  * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -1033,6 +1062,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
        RX_FLAG_DECRYPTED               = BIT(1),
+       RX_FLAG_MACTIME_PLCP_START      = BIT(2),
        RX_FLAG_MMIC_STRIPPED           = BIT(3),
        RX_FLAG_IV_STRIPPED             = BIT(4),
        RX_FLAG_FAILED_FCS_CRC          = BIT(5),
@@ -1046,7 +1076,7 @@ enum mac80211_rx_flags {
        RX_FLAG_HT_GF                   = BIT(13),
        RX_FLAG_AMPDU_DETAILS           = BIT(14),
        RX_FLAG_PN_VALIDATED            = BIT(15),
-       /* bit 16 free */
+       RX_FLAG_DUP_VALIDATED           = BIT(16),
        RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
        RX_FLAG_AMPDU_IS_LAST           = BIT(18),
        RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
@@ -1054,6 +1084,8 @@ enum mac80211_rx_flags {
        RX_FLAG_MACTIME_END             = BIT(21),
        RX_FLAG_VHT                     = BIT(22),
        RX_FLAG_LDPC                    = BIT(23),
+       RX_FLAG_ONLY_MONITOR            = BIT(24),
+       RX_FLAG_SKIP_MONITOR            = BIT(25),
        RX_FLAG_STBC_MASK               = BIT(26) | BIT(27),
        RX_FLAG_10MHZ                   = BIT(28),
        RX_FLAG_5MHZ                    = BIT(29),
@@ -1072,6 +1104,7 @@ enum mac80211_rx_flags {
  * @RX_VHT_FLAG_160MHZ: 160 MHz was used
  * @RX_VHT_FLAG_BF: packet was beamformed
  */
+
 enum mac80211_rx_vht_flags {
        RX_VHT_FLAG_80MHZ               = BIT(0),
        RX_VHT_FLAG_160MHZ              = BIT(1),
@@ -1091,6 +1124,8 @@ enum mac80211_rx_vht_flags {
  *     it but can store it and pass it back to the driver for synchronisation
  * @band: the active band when this frame was received
  * @freq: frequency the radio was tuned to when receiving this frame, in MHz
+ *     This field must be set for management frames, but isn't strictly needed
+ *     for data (other) frames - for those it only affects radiotap reporting.
  * @signal: signal strength when receiving this frame, either in dBm, in dB or
  *     unspecified depending on the hardware capabilities flags
  *     @IEEE80211_HW_SIGNAL_*
@@ -1684,6 +1719,18 @@ struct ieee80211_sta_rates {
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *     valid if the STA is a TDLS peer in the first place.
  * @mfp: indicates whether the STA uses management frame protection or not.
+ * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
+ *     A-MSDU. Taken from the Extended Capabilities element. 0 means
+ *     unlimited.
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This
+ *     field is always valid for packets with a VHT preamble. For packets
+ *     with a HT preamble, additional limits apply:
+ *             + If the skb is transmitted as part of a BA agreement, the
+ *               A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ *             + If the skb is not part of a BA aggreement, the A-MSDU maximal
+ *               size is min(max_amsdu_len, 7935) bytes.
+ *     Both additional HT limits must be enforced by the low level driver.
+ *     This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1702,6 +1749,8 @@ struct ieee80211_sta {
        bool tdls;
        bool tdls_initiator;
        bool mfp;
+       u8 max_amsdu_subframes;
+       u16 max_amsdu_len;
 
        struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1910,6 +1959,11 @@ struct ieee80211_txq {
  *     by just its MAC address; this prevents, for example, the same station
  *     from connecting to two virtual AP interfaces at the same time.
  *
+ * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the
+ *     reordering buffer internally, guaranteeing mac80211 receives frames in
+ *     order and does not need to manage its own reorder buffer or BA session
+ *     timeout.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1946,6 +2000,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
        IEEE80211_HW_BEACON_TX_STATUS,
        IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
+       IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
 
        /* keep last, obviously */
        NUM_IEEE80211_HW_FLAGS
@@ -2167,7 +2222,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev
  * @hw: the &struct ieee80211_hw to set the MAC address for
  * @addr: the address to set
  */
-static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
+static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr)
 {
        memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
 }
@@ -2683,6 +2738,33 @@ enum ieee80211_ampdu_mlme_action {
        IEEE80211_AMPDU_TX_OPERATIONAL,
 };
 
+/**
+ * struct ieee80211_ampdu_params - AMPDU action parameters
+ *
+ * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action.
+ * @sta: peer of this AMPDU session
+ * @tid: tid of the BA session
+ * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When
+ *     action is set to %IEEE80211_AMPDU_RX_START the driver passes back the
+ *     actual ssn value used to start the session and writes the value here.
+ * @buf_size: reorder buffer size  (number of subframes). Valid only when the
+ *     action is set to %IEEE80211_AMPDU_RX_START or
+ *     %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU.
+ *     valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @timeout: BA session timeout. Valid only when the action is set to
+ *     %IEEE80211_AMPDU_RX_START
+ */
+struct ieee80211_ampdu_params {
+       enum ieee80211_ampdu_mlme_action action;
+       struct ieee80211_sta *sta;
+       u16 tid;
+       u16 ssn;
+       u8 buf_size;
+       bool amsdu;
+       u16 timeout;
+};
+
 /**
  * enum ieee80211_frame_release_type - frame release reason
  * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
@@ -3027,13 +3109,9 @@ enum ieee80211_reconfig_type {
  * @ampdu_action: Perform a certain A-MPDU action
  *     The RA/TID combination determines the destination and TID we want
  *     the ampdu action to be performed for. The action is defined through
- *     ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
- *     is the first frame we expect to perform the action on. Notice
- *     that TX/RX_STOP can pass NULL for this parameter.
- *     The @buf_size parameter is only valid when the action is set to
- *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- *     buffer size (number of subframes) for this session -- the driver
- *     may neither send aggregates containing more subframes than this
+ *     ieee80211_ampdu_mlme_action.
+ *     When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
+ *     may neither send aggregates containing more subframes than @buf_size
  *     nor send aggregates in a way that lost frames would exceed the
  *     buffer size. If just limiting the aggregate size, this would be
  *     possible with a buf_size of 8:
@@ -3044,9 +3122,6 @@ enum ieee80211_reconfig_type {
  *     buffer size of 8. Correct ways to retransmit #1 would be:
  *      - TX:       1 or 18 or 81
  *     Even "189" would be wrong since 1 could be lost again.
- *     The @amsdu parameter is valid when the action is set to
- *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
- *     to receive A-MSDU within A-MPDU.
  *
  *     Returns a negative error code on failure.
  *     The callback can sleep.
@@ -3388,9 +3463,7 @@ struct ieee80211_ops {
        int (*tx_last_beacon)(struct ieee80211_hw *hw);
        int (*ampdu_action)(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
-                           enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                           u8 buf_size, bool amsdu);
+                           struct ieee80211_ampdu_params *params);
        int (*get_survey)(struct ieee80211_hw *hw, int idx,
                struct survey_info *survey);
        void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -5120,6 +5193,24 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw);
 void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
                                  const u8 *addr);
 
+/**
+ * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered
+ * @pubsta: station struct
+ * @tid: the session's TID
+ * @ssn: starting sequence number of the bitmap, all frames before this are
+ *     assumed to be out of the window after the call
+ * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc.
+ * @received_mpdus: number of received mpdus in firmware
+ *
+ * This function moves the BA window and releases all frames before @ssn, and
+ * marks frames marked in the bitmap as having been filtered. Afterwards, it
+ * checks if any frames in the window starting from @ssn can now be released
+ * (in case they were only waiting for frames that were filtered.)
+ */
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+                                         u16 ssn, u64 filtered,
+                                         u16 received_mpdus);
+
 /**
  * ieee80211_send_bar - send a BlockAckReq frame
  *
@@ -5523,4 +5614,19 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
  */
 struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
                                     struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_get_depth - get pending frame/byte count of given txq
+ *
+ * The values are not guaranteed to be coherent with regard to each other, i.e.
+ * txq state can change half-way of this function and the caller may end up
+ * with "new" frame_cnt and "old" byte_cnt or vice-versa.
+ *
+ * @txq: pointer obtained from station or virtual interface
+ * @frame_cnt: pointer to store frame count
+ * @byte_cnt: pointer to store byte count
+ */
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+                            unsigned long *frame_cnt,
+                            unsigned long *byte_cnt);
 #endif /* MAC80211_H */
index 788ef58a66b9b78807d164bce85060cc760ee336..62e17d1319ff7423dbcf176815f04cecfcdf3371 100644 (file)
@@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
             const struct nf_conntrack_l3proto *l3proto,
             const struct nf_conntrack_l4proto *proto);
 
-#ifdef CONFIG_LOCKDEP
-# define CONNTRACK_LOCKS 8
-#else
-# define CONNTRACK_LOCKS 1024
-#endif
+#define CONNTRACK_LOCKS 1024
+
 extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+void nf_conntrack_lock(spinlock_t *lock);
 
 extern spinlock_t nf_conntrack_expect_lock;
 
index 2b7907a3556877ec5311b7ee541e25f064d06644..4d6ec3f6fafe0d9b44b0383808d06f25e8e599db 100644 (file)
@@ -98,6 +98,16 @@ struct netns_ipv4 {
        int sysctl_tcp_keepalive_probes;
        int sysctl_tcp_keepalive_intvl;
 
+       int sysctl_tcp_syn_retries;
+       int sysctl_tcp_synack_retries;
+       int sysctl_tcp_syncookies;
+       int sysctl_tcp_reordering;
+       int sysctl_tcp_retries1;
+       int sysctl_tcp_retries2;
+       int sysctl_tcp_orphan_retries;
+       int sysctl_tcp_fin_timeout;
+       unsigned int sysctl_tcp_notsent_lowat;
+
        struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
index 262532d111f51e3a91a06af785b4721e8fab9d56..59fa93c01d2a16a129298498f1d56556b895acd9 100644 (file)
@@ -21,6 +21,7 @@ struct scm_creds {
 struct scm_fp_list {
        short                   count;
        short                   max;
+       struct user_struct      *user;
        struct file             *fp[SCM_MAX_FD];
 };
 
index 6b1e3838667edc18d4ea08ec9cfdc8b73dc5c924..5a57409da37bba7aaa652ec860044db4061ba4a4 100644 (file)
@@ -756,7 +756,6 @@ struct sctp_transport {
 
        /* Reference counting. */
        atomic_t refcnt;
-       __u32    dead:1,
                /* RTO-Pending : A flag used to track if one of the DATA
                 *              chunks sent to this address is currently being
                 *              used to compute a RTT. If this flag is 0,
@@ -766,7 +765,7 @@ struct sctp_transport {
                 *              calculation completes (i.e. the DATA chunk
                 *              is SACK'd) clear this flag.
                 */
-                rto_pending:1,
+       __u32   rto_pending:1,
 
                /*
                 * hb_sent : a flag that signals that we have a pending
@@ -955,7 +954,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
 void sctp_transport_reset_timers(struct sctp_transport *);
-void sctp_transport_hold(struct sctp_transport *);
+int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
 void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
index b9e7b3d863a09db498e00004da4da8a781e90b9f..f5ea148853e2f76257aca80a4aef65a30d587538 100644 (file)
@@ -1035,18 +1035,6 @@ struct proto {
        struct list_head        node;
 #ifdef SOCK_REFCNT_DEBUG
        atomic_t                socks;
-#endif
-#ifdef CONFIG_MEMCG_KMEM
-       /*
-        * cgroup specific init/deinit functions. Called once for all
-        * protocols that implement it, from cgroups populate function.
-        * This function has to setup any files the protocol want to
-        * appear in the kmem cgroup filesystem.
-        */
-       int                     (*init_cgroup)(struct mem_cgroup *memcg,
-                                              struct cgroup_subsys *ss);
-       void                    (*destroy_cgroup)(struct mem_cgroup *memcg);
-       struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
 #endif
        int                     (*diag_destroy)(struct sock *sk, int err);
 };
index 7dda3d7adba8e905b3d14db7f3361d9a96493b46..aecd30308d500b53a1b46902f2b3d254bcbc39b6 100644 (file)
@@ -16,7 +16,7 @@ struct sock_reuseport {
 };
 
 extern int reuseport_alloc(struct sock *sk);
-extern int reuseport_add_sock(struct sock *sk, const struct sock *sk2);
+extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
 extern void reuseport_detach_sock(struct sock *sk);
 extern struct sock *reuseport_select_sock(struct sock *sk,
                                          u32 hash,
index 2a5b3b8daee8a9e9926d5f01b85421f217ede631..cb8d30c20ef3efa694540d0bf92fd3e04369b34f 100644 (file)
@@ -215,7 +215,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 /* TCP thin-stream limits */
 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 
-/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+/* TCP initial congestion window as per rfc6928 */
 #define TCP_INIT_CWND          10
 
 /* Bit Flags for sysctl_tcp_fastopen */
@@ -238,13 +238,6 @@ extern struct inet_timewait_death_row tcp_death_row;
 extern int sysctl_tcp_timestamps;
 extern int sysctl_tcp_window_scaling;
 extern int sysctl_tcp_sack;
-extern int sysctl_tcp_fin_timeout;
-extern int sysctl_tcp_syn_retries;
-extern int sysctl_tcp_synack_retries;
-extern int sysctl_tcp_retries1;
-extern int sysctl_tcp_retries2;
-extern int sysctl_tcp_orphan_retries;
-extern int sysctl_tcp_syncookies;
 extern int sysctl_tcp_fastopen;
 extern int sysctl_tcp_retrans_collapse;
 extern int sysctl_tcp_stdurg;
@@ -273,7 +266,6 @@ extern int sysctl_tcp_thin_dupack;
 extern int sysctl_tcp_early_retrans;
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
-extern unsigned int sysctl_tcp_notsent_lowat;
 extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_min_rtt_wlen;
 extern int sysctl_tcp_autocorking;
@@ -446,7 +438,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 void tcp_v4_mtu_reduced(struct sock *sk);
-void tcp_req_err(struct sock *sk, u32 seq);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(const struct sock *sk,
                                      struct request_sock *req,
@@ -567,6 +559,7 @@ void tcp_rearm_rto(struct sock *sk);
 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk);
 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+void tcp_fin(struct sock *sk);
 
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
@@ -962,9 +955,11 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
  */
 static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
 {
+       struct net *net = sock_net((struct sock *)tp);
+
        tp->do_early_retrans = sysctl_tcp_early_retrans &&
                sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
-               sysctl_tcp_reordering == 3;
+               net->ipv4.sysctl_tcp_reordering == 3;
 }
 
 static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
@@ -1251,7 +1246,7 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
 
 static inline int tcp_fin_time(const struct sock *sk)
 {
-       int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
+       int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
        const int rto = inet_csk(sk)->icsk_rto;
 
        if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -1433,6 +1428,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
                              struct request_sock *req,
                              struct tcp_fastopen_cookie *foc,
@@ -1681,7 +1677,8 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
 
 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 {
-       return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+       struct net *net = sock_net((struct sock *)tp);
+       return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
 }
 
 static inline bool tcp_stream_memory_free(const struct sock *sk)
index 0fb86442544b26627fb5871f26d1ede748f9d6d9..25bd919c9ef0c9fcd91e62884781113d6b7c2d30 100644 (file)
@@ -9,17 +9,71 @@
 #include <linux/udp.h>
 #include <net/dst_metadata.h>
 
+/* VXLAN protocol (RFC 7348) header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R|               Reserved                        |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                VXLAN Network Identifier (VNI) |   Reserved    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * I = VXLAN Network Identifier (VNI) present.
+ */
+struct vxlanhdr {
+       __be32 vx_flags;
+       __be32 vx_vni;
+};
+
+/* VXLAN header flags. */
+#define VXLAN_HF_VNI BIT(27)
+
+#define VXLAN_N_VID     (1u << 24)
+#define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS  8
+#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
+
+/* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R|R|R|C|              Reserved                   |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |           VXLAN Network Identifier (VNI)      |O| Csum start  |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C = Remote checksum offload bit. When set indicates that the
+ *     remote checksum offload data is present.
+ *
+ * O = Offset bit. Indicates the checksum offset relative to
+ *     checksum start.
+ *
+ * Csum start = Checksum start divided by two.
+ *
+ * http://tools.ietf.org/html/draft-herbert-vxlan-rco
+ */
+
+/* VXLAN-RCO header flags. */
+#define VXLAN_HF_RCO BIT(21)
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK  0x7f    /* Last byte of vni field */
+#define VXLAN_RCO_UDP   0x80    /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1       /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
 
 /*
- * VXLAN Group Based Policy Extension:
+ * VXLAN Group Based Policy Extension (VXLAN_F_GBP):
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R|        Group Policy ID        |
+ * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R|        Group Policy ID        |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                VXLAN Network Identifier (VNI) |   Reserved    |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
+ * G = Group Policy ID present.
+ *
  * D = Don't Learn bit. When set, this bit indicates that the egress
  *     VTEP MUST NOT learn the source address of the encapsulated frame.
  *
  *     this packet. Policies MUST NOT be applied by devices when the
  *     A bit is set.
  *
- * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ * https://tools.ietf.org/html/draft-smith-vxlan-group-policy
  */
 struct vxlanhdr_gbp {
-       __u8    vx_flags;
+       u8      vx_flags;
 #ifdef __LITTLE_ENDIAN_BITFIELD
-       __u8    reserved_flags1:3,
+       u8      reserved_flags1:3,
                policy_applied:1,
                reserved_flags2:2,
                dont_learn:1,
                reserved_flags3:1;
 #elif defined(__BIG_ENDIAN_BITFIELD)
-       __u8    reserved_flags1:1,
+       u8      reserved_flags1:1,
                dont_learn:1,
                reserved_flags2:2,
                policy_applied:1,
@@ -50,6 +104,9 @@ struct vxlanhdr_gbp {
        __be32  vx_vni;
 };
 
+/* VXLAN-GBP header flags. */
+#define VXLAN_HF_GBP BIT(31)
+
 #define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF)
 
 /* skb->mark mapping
@@ -62,44 +119,6 @@ struct vxlanhdr_gbp {
 #define VXLAN_GBP_POLICY_APPLIED       (BIT(3) << 16)
 #define VXLAN_GBP_ID_MASK              (0xFFFF)
 
-/* VXLAN protocol header:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |G|R|R|R|I|R|R|C|               Reserved                        |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |                VXLAN Network Identifier (VNI) |   Reserved    |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * G = 1       Group Policy (VXLAN-GBP)
- * I = 1       VXLAN Network Identifier (VNI) present
- * C = 1       Remote checksum offload (RCO)
- */
-struct vxlanhdr {
-       __be32 vx_flags;
-       __be32 vx_vni;
-};
-
-/* VXLAN header flags. */
-#define VXLAN_HF_RCO BIT(21)
-#define VXLAN_HF_VNI BIT(27)
-#define VXLAN_HF_GBP BIT(31)
-
-/* Remote checksum offload header option */
-#define VXLAN_RCO_MASK  0x7f    /* Last byte of vni field */
-#define VXLAN_RCO_UDP   0x80    /* Indicate UDP RCO (TCP when not set *) */
-#define VXLAN_RCO_SHIFT 1       /* Left shift of start */
-#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
-#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
-
-#define VXLAN_N_VID     (1u << 24)
-#define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
-#define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VNI_HASH_BITS  10
-#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS  8
-#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
-
 struct vxlan_metadata {
        u32             gbp;
 };
@@ -138,10 +157,10 @@ struct vxlan_config {
        int                     remote_ifindex;
        int                     mtu;
        __be16                  dst_port;
-       __u16                   port_min;
-       __u16                   port_max;
-       __u8                    tos;
-       __u8                    ttl;
+       u16                     port_min;
+       u16                     port_max;
+       u8                      tos;
+       u8                      ttl;
        u32                     flags;
        unsigned long           age_interval;
        unsigned int            addrmax;
index fdabbb4ddba92f801e2952fafaeb997c7fa03a33..f730b91e472f19f97b15ec8982443fe099dd5db4 100644 (file)
@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
 int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
                         unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+                             unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+                              int count);
 
 /* main midi functions */
 
index 98feb1b82896504bfc0a40157c84a22ca137e259..d6dfa05ba322c9465d3027c0c992a97eff6a504e 100644 (file)
@@ -17,7 +17,7 @@ TRACE_EVENT(fence_annotate_wait_on,
 
        TP_STRUCT__entry(
                __string(driver, fence->ops->get_driver_name(fence))
-               __string(timeline, fence->ops->get_driver_name(fence))
+               __string(timeline, fence->ops->get_timeline_name(fence))
                __field(unsigned int, context)
                __field(unsigned int, seqno)
 
index 284244ebfe8d2c3ef7036c3a7e9b55ea15b11078..19e50300ce7d63da516ef9ea365c156a4e77ad3c 100644 (file)
@@ -38,6 +38,28 @@ DEFINE_EVENT(cpu, cpu_idle,
        TP_ARGS(state, cpu_id)
 );
 
+TRACE_EVENT(powernv_throttle,
+
+       TP_PROTO(int chip_id, const char *reason, int pmax),
+
+       TP_ARGS(chip_id, reason, pmax),
+
+       TP_STRUCT__entry(
+               __field(int, chip_id)
+               __string(reason, reason)
+               __field(int, pmax)
+       ),
+
+       TP_fast_assign(
+               __entry->chip_id = chip_id;
+               __assign_str(reason, reason);
+               __entry->pmax = pmax;
+       ),
+
+       TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+                 __entry->pmax, __get_str(reason))
+);
+
 TRACE_EVENT(pstate_sample,
 
        TP_PROTO(u32 core_busy,
diff --git a/include/trace/events/sunvnet.h b/include/trace/events/sunvnet.h
new file mode 100644 (file)
index 0000000..eb080b2
--- /dev/null
@@ -0,0 +1,139 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sunvnet
+
+#if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SUNVNET_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vnet_rx_one,
+
+       TP_PROTO(int lsid, int rsid, int index, int needs_ack),
+
+       TP_ARGS(lsid, rsid, index, needs_ack),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, index)
+               __field(int, needs_ack)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->index = index;
+               __entry->needs_ack = needs_ack;
+       ),
+
+       TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d",
+               __entry->lsid, __entry->rsid,
+               __entry->index, __entry->needs_ack)
+);
+
+DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template,
+
+       TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+
+       TP_ARGS(lsid, rsid, ack_end, npkts),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, ack_end)
+               __field(int, npkts)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->ack_end = ack_end;
+               __entry->npkts = npkts;
+       ),
+
+       TP_printk("(%x:%x) stopped ack for %d; npkts %d",
+               __entry->lsid, __entry->rsid,
+               __entry->ack_end, __entry->npkts)
+);
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack,
+            TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+            TP_ARGS(lsid, rsid, ack_end, npkts));
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack,
+            TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+            TP_ARGS(lsid, rsid, ack_end, npkts));
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack,
+            TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+            TP_ARGS(lsid, rsid, ack_end, npkts));
+
+TRACE_EVENT(vnet_rx_stopped_ack,
+
+       TP_PROTO(int lsid, int rsid, int end),
+
+       TP_ARGS(lsid, rsid, end),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, end)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->end = end;
+       ),
+
+       TP_printk("(%x:%x) stopped ack for index %d",
+               __entry->lsid, __entry->rsid, __entry->end)
+);
+
+TRACE_EVENT(vnet_tx_trigger,
+
+       TP_PROTO(int lsid, int rsid, int start, int err),
+
+       TP_ARGS(lsid, rsid, start, err),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, start)
+               __field(int, err)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->start = start;
+               __entry->err = err;
+       ),
+
+       TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s",
+               __entry->lsid, __entry->rsid, __entry->start,
+               __entry->err, __entry->err > 0 ? "(ok)" : " ")
+);
+
+TRACE_EVENT(vnet_skip_tx_trigger,
+
+       TP_PROTO(int lsid, int rsid, int last),
+
+       TP_ARGS(lsid, rsid, last),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, last)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->last = last;
+       ),
+
+       TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d",
+               __entry->lsid, __entry->rsid, __entry->last)
+);
+#endif /* _TRACE_SOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 4cc989ad685164689072f90f1ef2c40f633c4088..f95e1c43c3fbc06fc2de769cd5360a5559d549b3 100644 (file)
@@ -48,6 +48,8 @@ struct drm_etnaviv_timespec {
 #define ETNAVIV_PARAM_GPU_FEATURES_2                0x05
 #define ETNAVIV_PARAM_GPU_FEATURES_3                0x06
 #define ETNAVIV_PARAM_GPU_FEATURES_4                0x07
+#define ETNAVIV_PARAM_GPU_FEATURES_5                0x08
+#define ETNAVIV_PARAM_GPU_FEATURES_6                0x09
 
 #define ETNAVIV_PARAM_GPU_STREAM_COUNT              0x10
 #define ETNAVIV_PARAM_GPU_REGISTER_MAX              0x11
@@ -59,6 +61,7 @@ struct drm_etnaviv_timespec {
 #define ETNAVIV_PARAM_GPU_BUFFER_SIZE               0x17
 #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT         0x18
 #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS             0x19
+#define ETNAVIV_PARAM_GPU_NUM_VARYINGS              0x1a
 
 #define ETNA_MAX_PIPES 4
 
index aa6f8571de136b74fba93996883bd69b3e28d412..2ee0fde1bf9649739e8663dc480742fdf0ede72f 100644 (file)
@@ -81,6 +81,8 @@ enum bpf_map_type {
        BPF_MAP_TYPE_ARRAY,
        BPF_MAP_TYPE_PROG_ARRAY,
        BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       BPF_MAP_TYPE_PERCPU_HASH,
+       BPF_MAP_TYPE_PERCPU_ARRAY,
 };
 
 enum bpf_prog_type {
index dcd75cc261962f65c909a6efa0defe3f1dcdc281..11b3b31faf1483a46122a67e93b823182b768cb9 100644 (file)
@@ -39,6 +39,7 @@
 #define Q_XQUOTARM     XQM_CMD(6)      /* free disk space used by dquots */
 #define Q_XQUOTASYNC   XQM_CMD(7)      /* delalloc flush, updates dquots */
 #define Q_XGETQSTATV   XQM_CMD(8)      /* newer version of get quota */
+#define Q_XGETNEXTQUOTA        XQM_CMD(9)      /* get disk limits and usage >= ID */
 
 /*
  * fs_disk_quota structure:
index 57fa39005e794c65931f8e4cfd425d470693addd..b2e18018162987288a7b9ea3715e6e5f8cd3ce1b 100644 (file)
@@ -1319,11 +1319,45 @@ enum ethtool_sfeatures_retval_bits {
 
 #define SPEED_UNKNOWN          -1
 
+static inline int ethtool_validate_speed(__u32 speed)
+{
+       switch (speed) {
+       case SPEED_10:
+       case SPEED_100:
+       case SPEED_1000:
+       case SPEED_2500:
+       case SPEED_5000:
+       case SPEED_10000:
+       case SPEED_20000:
+       case SPEED_25000:
+       case SPEED_40000:
+       case SPEED_50000:
+       case SPEED_56000:
+       case SPEED_100000:
+       case SPEED_UNKNOWN:
+               return 1;
+       }
+
+       return 0;
+}
+
 /* Duplex, half or full. */
 #define DUPLEX_HALF            0x00
 #define DUPLEX_FULL            0x01
 #define DUPLEX_UNKNOWN         0xff
 
+static inline int ethtool_validate_duplex(__u8 duplex)
+{
+       switch (duplex) {
+       case DUPLEX_HALF:
+       case DUPLEX_FULL:
+       case DUPLEX_UNKNOWN:
+               return 1;
+       }
+
+       return 0;
+}
+
 /* Which connector port. */
 #define PORT_TP                        0x00
 #define PORT_AUI               0x01
index 41e0433b4a8398b3dc3da6d2d6aacb8e46287150..149bec83a907515dccea4425acf3b71d8d18410f 100644 (file)
@@ -222,7 +222,6 @@ struct fsxattr {
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
-#define BLKDAXSET _IO(0x12,128)
 #define BLKDAXGET _IO(0x12,129)
 
 #define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
index 18db14477bdda952fa946ccfba979950233f3130..ec35472349988fa556f39028d5ea8ecd689b85b0 100644 (file)
@@ -183,6 +183,8 @@ struct br_mdb_entry {
 #define MDB_TEMPORARY 0
 #define MDB_PERMANENT 1
        __u8 state;
+#define MDB_FLAGS_OFFLOAD      (1 << 0)
+       __u8 flags;
        __u16 vid;
        struct {
                union {
index a30b78090594d500df10aa91f9f1b6628ae88593..d452cea5902039e2abd15cbd344a75fd879a6228 100644 (file)
@@ -35,6 +35,8 @@ struct rtnl_link_stats {
        /* for cslip etc */
        __u32   rx_compressed;
        __u32   tx_compressed;
+
+       __u32   rx_nohandler;           /* dropped, no handler found    */
 };
 
 /* The main device statistics structure */
@@ -68,6 +70,8 @@ struct rtnl_link_stats64 {
        /* for cslip etc */
        __u64   rx_compressed;
        __u64   tx_compressed;
+
+       __u64   rx_nohandler;           /* dropped, no handler found    */
 };
 
 /* The struct should be in sync with struct ifmap */
@@ -401,6 +405,14 @@ enum {
 
 #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
 
+enum {
+       IFLA_VRF_PORT_UNSPEC,
+       IFLA_VRF_PORT_TABLE,
+       __IFLA_VRF_PORT_MAX
+};
+
+#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1)
+
 /* IPVLAN section */
 enum {
        IFLA_IPVLAN_UNSPEC,
index 1e3c8cb43bd7ac61650da4b6e31ab854af4a7b89..c9eb42a6c0210fa1f075cc294c70516d18dc17f8 100644 (file)
@@ -88,6 +88,15 @@ struct media_device_info {
 #define MEDIA_ENT_F_IO_VBI             (MEDIA_ENT_F_BASE + 32)
 #define MEDIA_ENT_F_IO_SWRADIO         (MEDIA_ENT_F_BASE + 33)
 
+/*
+ * Analog TV IF-PLL decoders
+ *
+ * It is a responsibility of the master/bridge drivers to create links
+ * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER.
+ */
+#define MEDIA_ENT_F_IF_VID_DECODER     (MEDIA_ENT_F_BASE + 41)
+#define MEDIA_ENT_F_IF_AUD_DECODER     (MEDIA_ENT_F_BASE + 42)
+
 /*
  * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and
  * MEDIA_ENT_F_OLD_SUBDEV_BASE are kept to keep backward compatibility
@@ -107,8 +116,12 @@ struct media_device_info {
 #define MEDIA_ENT_F_LENS               (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
 #define MEDIA_ENT_F_ATV_DECODER                (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
 /*
- * It is a responsibility of the entity drivers to add connectors and links
- *     for the tuner entities.
+ * It is a responsibility of the master/bridge drivers to add connectors
+ * and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners
+ * may require the usage of separate I2C chips to decode analog TV signals,
+ * when the master/bridge chipset doesn't have its own TV standard decoder.
+ * On such cases, the IF-PLL staging is mapped via one or two entities:
+ * MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER.
  */
 #define MEDIA_ENT_F_TUNER              (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5)
 
@@ -286,7 +299,7 @@ struct media_links_enum {
  *       later, before the adding this API upstream.
  */
 
-#if 0 /* Let's postpone it to Kernel 4.6 */
+
 struct media_v2_entity {
        __u32 id;
        char name[64];          /* FIXME: move to a property? (RFC says so) */
@@ -351,7 +364,6 @@ static inline void __user *media_get_uptr(__u64 arg)
 {
        return (void __user *)(uintptr_t)arg;
 }
-#endif
 
 /* ioctls */
 
@@ -359,9 +371,6 @@ static inline void __user *media_get_uptr(__u64 arg)
 #define MEDIA_IOC_ENUM_ENTITIES                _IOWR('|', 0x01, struct media_entity_desc)
 #define MEDIA_IOC_ENUM_LINKS           _IOWR('|', 0x02, struct media_links_enum)
 #define MEDIA_IOC_SETUP_LINK           _IOWR('|', 0x03, struct media_link_desc)
-
-#if 0 /* Let's postpone it to Kernel 4.6 */
 #define MEDIA_IOC_G_TOPOLOGY           _IOWR('|', 0x04, struct media_v2_topology)
-#endif
 
 #endif /* __LINUX_MEDIA_H */
index 5b7b5ebe7ca8731868e38bfab6218c166cb0317d..7758969a2a8e83f53e1bfa8f810b3826b9c7d44a 100644 (file)
@@ -1789,6 +1789,10 @@ enum nl80211_commands {
  *     thus it must not specify the number of iterations, only the interval
  *     between scans. The scan plans are executed sequentially.
  *     Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ * @NL80211_ATTR_PBSS: flag attribute. If set it means operate
+ *     in a PBSS. Specified in %NL80211_CMD_CONNECT to request
+ *     connecting to a PCP, and in %NL80211_CMD_START_AP to start
+ *     a PCP instead of AP. Relevant for DMG networks only.
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2164,6 +2168,8 @@ enum nl80211_attrs {
        NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
        NL80211_ATTR_SCHED_SCAN_PLANS,
 
+       NL80211_ATTR_PBSS,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
index 9c95b2c1c88a6ef0a6bb4207cd5122011d3007f9..38baddb807f503f5f526d93377df677ab80c5ad1 100644 (file)
@@ -71,6 +71,7 @@
 #define Q_SETINFO  0x800006    /* set information about quota files */
 #define Q_GETQUOTA 0x800007    /* get user quota structure */
 #define Q_SETQUOTA 0x800008    /* set user quota structure */
+#define Q_GETNEXTQUOTA 0x800009        /* get disk limits and usage >= ID */
 
 /* Quota format type IDs */
 #define        QFMT_VFS_OLD 1
@@ -119,6 +120,19 @@ struct if_dqblk {
        __u32 dqb_valid;
 };
 
+struct if_nextdqblk {
+       __u64 dqb_bhardlimit;
+       __u64 dqb_bsoftlimit;
+       __u64 dqb_curspace;
+       __u64 dqb_ihardlimit;
+       __u64 dqb_isoftlimit;
+       __u64 dqb_curinodes;
+       __u64 dqb_btime;
+       __u64 dqb_itime;
+       __u32 dqb_valid;
+       __u32 dqb_id;
+};
+
 /*
  * Structure used for setting quota information about file via quotactl
  * Following flags are used to specify which fields are valid
index 058757f7a7339b104f9f44ebd77569cf20f35ef6..2e00dcebebd0700ac835d0d98221ef1cb4717974 100644 (file)
@@ -59,6 +59,8 @@ enum rfkill_type {
  * @RFKILL_OP_DEL: a device was removed
  * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device
  * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all)
+ *     into a state, also updating the default state used for devices that
+ *     are hot-plugged later.
  */
 enum rfkill_operation {
        RFKILL_OP_ADD = 0,
index 15273987093e41e7823f98a953f088332db066aa..5b3f685a2d50ca2146e579f81580aecb61f19922 100644 (file)
  * Copyright (C) 2012 Nokia Corporation
  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
  *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
+ *  Alternatively you can redistribute this file under the terms of the
+ *  BSD license as stated below:
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *  3. The names of its contributors may not be used to endorse or promote
+ *     products derived from this software without specific prior written
+ *     permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ *  TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ *  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  */
 
index b0799bced518691bd847a16973be74184bcdcbaf..bd3bdf2486a7b1aa4744f2bd5045ff21e47812cd 100644 (file)
 #include <linux/filter.h>
 #include <linux/perf_event.h>
 
+static void bpf_array_free_percpu(struct bpf_array *array)
+{
+       int i;
+
+       for (i = 0; i < array->map.max_entries; i++)
+               free_percpu(array->pptrs[i]);
+}
+
+static int bpf_array_alloc_percpu(struct bpf_array *array)
+{
+       void __percpu *ptr;
+       int i;
+
+       for (i = 0; i < array->map.max_entries; i++) {
+               ptr = __alloc_percpu_gfp(array->elem_size, 8,
+                                        GFP_USER | __GFP_NOWARN);
+               if (!ptr) {
+                       bpf_array_free_percpu(array);
+                       return -ENOMEM;
+               }
+               array->pptrs[i] = ptr;
+       }
+
+       return 0;
+}
+
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
+       bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        struct bpf_array *array;
-       u32 elem_size, array_size;
+       u64 array_size;
+       u32 elem_size;
 
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -36,12 +64,16 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        elem_size = round_up(attr->value_size, 8);
 
-       /* check round_up into zero and u32 overflow */
-       if (elem_size == 0 ||
-           attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
+       array_size = sizeof(*array);
+       if (percpu)
+               array_size += (u64) attr->max_entries * sizeof(void *);
+       else
+               array_size += (u64) attr->max_entries * elem_size;
+
+       /* make sure there is no u32 overflow later in round_up() */
+       if (array_size >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-ENOMEM);
 
-       array_size = sizeof(*array) + attr->max_entries * elem_size;
 
        /* allocate all map elements and zero-initialize them */
        array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
@@ -52,12 +84,25 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        }
 
        /* copy mandatory map attributes */
+       array->map.map_type = attr->map_type;
        array->map.key_size = attr->key_size;
        array->map.value_size = attr->value_size;
        array->map.max_entries = attr->max_entries;
-       array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
        array->elem_size = elem_size;
 
+       if (!percpu)
+               goto out;
+
+       array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
+
+       if (array_size >= U32_MAX - PAGE_SIZE ||
+           elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+               kvfree(array);
+               return ERR_PTR(-ENOMEM);
+       }
+out:
+       array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
+
        return &array->map;
 }
 
@@ -67,12 +112,50 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 index = *(u32 *)key;
 
-       if (index >= array->map.max_entries)
+       if (unlikely(index >= array->map.max_entries))
                return NULL;
 
        return array->value + array->elem_size * index;
 }
 
+/* Called from eBPF program */
+static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u32 index = *(u32 *)key;
+
+       if (unlikely(index >= array->map.max_entries))
+               return NULL;
+
+       return this_cpu_ptr(array->pptrs[index]);
+}
+
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u32 index = *(u32 *)key;
+       void __percpu *pptr;
+       int cpu, off = 0;
+       u32 size;
+
+       if (unlikely(index >= array->map.max_entries))
+               return -ENOENT;
+
+       /* per_cpu areas are zero-filled and bpf programs can only
+        * access 'value_size' of them, so copying rounded areas
+        * will not leak any kernel data
+        */
+       size = round_up(map->value_size, 8);
+       rcu_read_lock();
+       pptr = array->pptrs[index];
+       for_each_possible_cpu(cpu) {
+               bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
+               off += size;
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
 /* Called from syscall */
 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
@@ -99,19 +182,62 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 index = *(u32 *)key;
 
-       if (map_flags > BPF_EXIST)
+       if (unlikely(map_flags > BPF_EXIST))
                /* unknown flags */
                return -EINVAL;
 
-       if (index >= array->map.max_entries)
+       if (unlikely(index >= array->map.max_entries))
+               /* all elements were pre-allocated, cannot insert a new one */
+               return -E2BIG;
+
+       if (unlikely(map_flags == BPF_NOEXIST))
+               /* all elements already exist */
+               return -EEXIST;
+
+       if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               memcpy(this_cpu_ptr(array->pptrs[index]),
+                      value, map->value_size);
+       else
+               memcpy(array->value + array->elem_size * index,
+                      value, map->value_size);
+       return 0;
+}
+
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+                           u64 map_flags)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u32 index = *(u32 *)key;
+       void __percpu *pptr;
+       int cpu, off = 0;
+       u32 size;
+
+       if (unlikely(map_flags > BPF_EXIST))
+               /* unknown flags */
+               return -EINVAL;
+
+       if (unlikely(index >= array->map.max_entries))
                /* all elements were pre-allocated, cannot insert a new one */
                return -E2BIG;
 
-       if (map_flags == BPF_NOEXIST)
+       if (unlikely(map_flags == BPF_NOEXIST))
                /* all elements already exist */
                return -EEXIST;
 
-       memcpy(array->value + array->elem_size * index, value, map->value_size);
+       /* the user space will provide round_up(value_size, 8) bytes that
+        * will be copied into per-cpu area. bpf programs can only access
+        * value_size of it. During lookup the same extra bytes will be
+        * returned or zeros which were zero-filled by percpu_alloc,
+        * so no kernel data leaks possible
+        */
+       size = round_up(map->value_size, 8);
+       rcu_read_lock();
+       pptr = array->pptrs[index];
+       for_each_possible_cpu(cpu) {
+               bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
+               off += size;
+       }
+       rcu_read_unlock();
        return 0;
 }
 
@@ -133,6 +259,9 @@ static void array_map_free(struct bpf_map *map)
         */
        synchronize_rcu();
 
+       if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               bpf_array_free_percpu(array);
+
        kvfree(array);
 }
 
@@ -150,9 +279,24 @@ static struct bpf_map_type_list array_type __read_mostly = {
        .type = BPF_MAP_TYPE_ARRAY,
 };
 
+static const struct bpf_map_ops percpu_array_ops = {
+       .map_alloc = array_map_alloc,
+       .map_free = array_map_free,
+       .map_get_next_key = array_map_get_next_key,
+       .map_lookup_elem = percpu_array_map_lookup_elem,
+       .map_update_elem = array_map_update_elem,
+       .map_delete_elem = array_map_delete_elem,
+};
+
+static struct bpf_map_type_list percpu_array_type __read_mostly = {
+       .ops = &percpu_array_ops,
+       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+};
+
 static int __init register_array_map(void)
 {
        bpf_register_map_type(&array_type);
+       bpf_register_map_type(&percpu_array_type);
        return 0;
 }
 late_initcall(register_array_map);
@@ -291,10 +435,13 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
 {
        struct perf_event *event;
        const struct perf_event_attr *attr;
+       struct file *file;
 
-       event = perf_event_get(fd);
-       if (IS_ERR(event))
-               return event;
+       file = perf_event_get(fd);
+       if (IS_ERR(file))
+               return file;
+
+       event = file->private_data;
 
        attr = perf_event_attrs(event);
        if (IS_ERR(attr))
@@ -304,24 +451,22 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
                goto err;
 
        if (attr->type == PERF_TYPE_RAW)
-               return event;
+               return file;
 
        if (attr->type == PERF_TYPE_HARDWARE)
-               return event;
+               return file;
 
        if (attr->type == PERF_TYPE_SOFTWARE &&
            attr->config == PERF_COUNT_SW_BPF_OUTPUT)
-               return event;
+               return file;
 err:
-       perf_event_release_kernel(event);
+       fput(file);
        return ERR_PTR(-EINVAL);
 }
 
 static void perf_event_fd_array_put_ptr(void *ptr)
 {
-       struct perf_event *event = ptr;
-
-       perf_event_release_kernel(event);
+       fput((struct file *)ptr);
 }
 
 static const struct bpf_map_ops perf_event_array_ops = {
index c5b30fd8a3151f99b8cf175ad2a7ce563a0e13c8..fd5db8fe9360db2134339d3e3f4ec47032d42772 100644 (file)
@@ -31,21 +31,27 @@ struct bpf_htab {
 struct htab_elem {
        struct hlist_node hash_node;
        struct rcu_head rcu;
-       u32 hash;
+       union {
+               u32 hash;
+               u32 key_size;
+       };
        char key[0] __aligned(8);
 };
 
 /* Called from syscall */
 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 {
+       bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
        struct bpf_htab *htab;
        int err, i;
+       u64 cost;
 
        htab = kzalloc(sizeof(*htab), GFP_USER);
        if (!htab)
                return ERR_PTR(-ENOMEM);
 
        /* mandatory map attributes */
+       htab->map.map_type = attr->map_type;
        htab->map.key_size = attr->key_size;
        htab->map.value_size = attr->value_size;
        htab->map.max_entries = attr->max_entries;
@@ -77,24 +83,34 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
+       if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
+               /* make sure the size for pcpu_alloc() is reasonable */
+               goto free_htab;
+
        htab->elem_size = sizeof(struct htab_elem) +
-                         round_up(htab->map.key_size, 8) +
-                         htab->map.value_size;
+                         round_up(htab->map.key_size, 8);
+       if (percpu)
+               htab->elem_size += sizeof(void *);
+       else
+               htab->elem_size += htab->map.value_size;
 
        /* prevent zero size kmalloc and check for u32 overflow */
        if (htab->n_buckets == 0 ||
            htab->n_buckets > U32_MAX / sizeof(struct bucket))
                goto free_htab;
 
-       if ((u64) htab->n_buckets * sizeof(struct bucket) +
-           (u64) htab->elem_size * htab->map.max_entries >=
-           U32_MAX - PAGE_SIZE)
+       cost = (u64) htab->n_buckets * sizeof(struct bucket) +
+              (u64) htab->elem_size * htab->map.max_entries;
+
+       if (percpu)
+               cost += (u64) round_up(htab->map.value_size, 8) *
+                       num_possible_cpus() * htab->map.max_entries;
+
+       if (cost >= U32_MAX - PAGE_SIZE)
                /* make sure page count doesn't overflow */
                goto free_htab;
 
-       htab->map.pages = round_up(htab->n_buckets * sizeof(struct bucket) +
-                                  htab->elem_size * htab->map.max_entries,
-                                  PAGE_SIZE) >> PAGE_SHIFT;
+       htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        err = -ENOMEM;
        htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
@@ -148,7 +164,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
 }
 
 /* Called from syscall or from eBPF program */
-static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
+static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct hlist_head *head;
@@ -166,6 +182,13 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 
        l = lookup_elem_raw(head, hash, key, key_size);
 
+       return l;
+}
+
+static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
        if (l)
                return l->key + round_up(map->key_size, 8);
 
@@ -230,65 +253,149 @@ find_first_elem:
        return -ENOENT;
 }
 
+
+static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
+                                    void __percpu *pptr)
+{
+       *(void __percpu **)(l->key + key_size) = pptr;
+}
+
+static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
+{
+       return *(void __percpu **)(l->key + key_size);
+}
+
+static void htab_percpu_elem_free(struct htab_elem *l)
+{
+       free_percpu(htab_elem_get_ptr(l, l->key_size));
+       kfree(l);
+}
+
+static void htab_percpu_elem_free_rcu(struct rcu_head *head)
+{
+       struct htab_elem *l = container_of(head, struct htab_elem, rcu);
+
+       htab_percpu_elem_free(l);
+}
+
+static void free_htab_elem(struct htab_elem *l, bool percpu, u32 key_size)
+{
+       if (percpu) {
+               l->key_size = key_size;
+               call_rcu(&l->rcu, htab_percpu_elem_free_rcu);
+       } else {
+               kfree_rcu(l, rcu);
+       }
+}
+
+static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+                                        void *value, u32 key_size, u32 hash,
+                                        bool percpu, bool onallcpus)
+{
+       u32 size = htab->map.value_size;
+       struct htab_elem *l_new;
+       void __percpu *pptr;
+
+       l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+       if (!l_new)
+               return NULL;
+
+       memcpy(l_new->key, key, key_size);
+       if (percpu) {
+               /* round up value_size to 8 bytes */
+               size = round_up(size, 8);
+
+               /* alloc_percpu zero-fills */
+               pptr = __alloc_percpu_gfp(size, 8, GFP_ATOMIC | __GFP_NOWARN);
+               if (!pptr) {
+                       kfree(l_new);
+                       return NULL;
+               }
+
+               if (!onallcpus) {
+                       /* copy true value_size bytes */
+                       memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
+               } else {
+                       int off = 0, cpu;
+
+                       for_each_possible_cpu(cpu) {
+                               bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
+                                               value + off, size);
+                               off += size;
+                       }
+               }
+               htab_elem_set_ptr(l_new, key_size, pptr);
+       } else {
+               memcpy(l_new->key + round_up(key_size, 8), value, size);
+       }
+
+       l_new->hash = hash;
+       return l_new;
+}
+
+static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
+                      u64 map_flags)
+{
+       if (!l_old && unlikely(atomic_read(&htab->count) >= htab->map.max_entries))
+               /* if elem with this 'key' doesn't exist and we've reached
+                * max_entries limit, fail insertion of new elem
+                */
+               return -E2BIG;
+
+       if (l_old && map_flags == BPF_NOEXIST)
+               /* elem already exists */
+               return -EEXIST;
+
+       if (!l_old && map_flags == BPF_EXIST)
+               /* elem doesn't exist, cannot update it */
+               return -ENOENT;
+
+       return 0;
+}
+
 /* Called from syscall or from eBPF program */
 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                                u64 map_flags)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct htab_elem *l_new, *l_old;
+       struct htab_elem *l_new = NULL, *l_old;
        struct hlist_head *head;
-       struct bucket *b;
        unsigned long flags;
-       u32 key_size;
+       struct bucket *b;
+       u32 key_size, hash;
        int ret;
 
-       if (map_flags > BPF_EXIST)
+       if (unlikely(map_flags > BPF_EXIST))
                /* unknown flags */
                return -EINVAL;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* allocate new element outside of lock */
-       l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
-       if (!l_new)
-               return -ENOMEM;
-
        key_size = map->key_size;
 
-       memcpy(l_new->key, key, key_size);
-       memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
+       hash = htab_map_hash(key, key_size);
+
+       /* allocate new element outside of the lock, since
+        * we're most likley going to insert it
+        */
+       l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
+       if (!l_new)
+               return -ENOMEM;
 
-       l_new->hash = htab_map_hash(l_new->key, key_size);
-       b = __select_bucket(htab, l_new->hash);
+       b = __select_bucket(htab, hash);
        head = &b->head;
 
        /* bpf_map_update_elem() can be called in_irq() */
        raw_spin_lock_irqsave(&b->lock, flags);
 
-       l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
+       l_old = lookup_elem_raw(head, hash, key, key_size);
 
-       if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) {
-               /* if elem with this 'key' doesn't exist and we've reached
-                * max_entries limit, fail insertion of new elem
-                */
-               ret = -E2BIG;
+       ret = check_flags(htab, l_old, map_flags);
+       if (ret)
                goto err;
-       }
 
-       if (l_old && map_flags == BPF_NOEXIST) {
-               /* elem already exists */
-               ret = -EEXIST;
-               goto err;
-       }
-
-       if (!l_old && map_flags == BPF_EXIST) {
-               /* elem doesn't exist, cannot update it */
-               ret = -ENOENT;
-               goto err;
-       }
-
-       /* add new element to the head of the list, so that concurrent
-        * search will find it before old elem
+       /* add new element to the head of the list, so that
+        * concurrent search will find it before old elem
         */
        hlist_add_head_rcu(&l_new->hash_node, head);
        if (l_old) {
@@ -298,7 +405,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                atomic_inc(&htab->count);
        }
        raw_spin_unlock_irqrestore(&b->lock, flags);
-
        return 0;
 err:
        raw_spin_unlock_irqrestore(&b->lock, flags);
@@ -306,10 +412,84 @@ err:
        return ret;
 }
 
+static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
+                                        void *value, u64 map_flags,
+                                        bool onallcpus)
+{
+       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct htab_elem *l_new = NULL, *l_old;
+       struct hlist_head *head;
+       unsigned long flags;
+       struct bucket *b;
+       u32 key_size, hash;
+       int ret;
+
+       if (unlikely(map_flags > BPF_EXIST))
+               /* unknown flags */
+               return -EINVAL;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       key_size = map->key_size;
+
+       hash = htab_map_hash(key, key_size);
+
+       b = __select_bucket(htab, hash);
+       head = &b->head;
+
+       /* bpf_map_update_elem() can be called in_irq() */
+       raw_spin_lock_irqsave(&b->lock, flags);
+
+       l_old = lookup_elem_raw(head, hash, key, key_size);
+
+       ret = check_flags(htab, l_old, map_flags);
+       if (ret)
+               goto err;
+
+       if (l_old) {
+               void __percpu *pptr = htab_elem_get_ptr(l_old, key_size);
+               u32 size = htab->map.value_size;
+
+               /* per-cpu hash map can update value in-place */
+               if (!onallcpus) {
+                       memcpy(this_cpu_ptr(pptr), value, size);
+               } else {
+                       int off = 0, cpu;
+
+                       size = round_up(size, 8);
+                       for_each_possible_cpu(cpu) {
+                               bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
+                                               value + off, size);
+                               off += size;
+                       }
+               }
+       } else {
+               l_new = alloc_htab_elem(htab, key, value, key_size,
+                                       hash, true, onallcpus);
+               if (!l_new) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               hlist_add_head_rcu(&l_new->hash_node, head);
+               atomic_inc(&htab->count);
+       }
+       ret = 0;
+err:
+       raw_spin_unlock_irqrestore(&b->lock, flags);
+       return ret;
+}
+
+static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
+                                      void *value, u64 map_flags)
+{
+       return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
+}
+
 /* Called from syscall or from eBPF program */
 static int htab_map_delete_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_HASH;
        struct hlist_head *head;
        struct bucket *b;
        struct htab_elem *l;
@@ -332,7 +512,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
        if (l) {
                hlist_del_rcu(&l->hash_node);
                atomic_dec(&htab->count);
-               kfree_rcu(l, rcu);
+               free_htab_elem(l, percpu, key_size);
                ret = 0;
        }
 
@@ -352,7 +532,12 @@ static void delete_all_elements(struct bpf_htab *htab)
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        hlist_del_rcu(&l->hash_node);
                        atomic_dec(&htab->count);
-                       kfree(l);
+                       if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+                               l->key_size = htab->map.key_size;
+                               htab_percpu_elem_free(l);
+                       } else {
+                               kfree(l);
+                       }
                }
        }
 }
@@ -391,9 +576,70 @@ static struct bpf_map_type_list htab_type __read_mostly = {
        .type = BPF_MAP_TYPE_HASH,
 };
 
+/* Called from eBPF program */
+static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
+       if (l)
+               return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
+       else
+               return NULL;
+}
+
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
+{
+       struct htab_elem *l;
+       void __percpu *pptr;
+       int ret = -ENOENT;
+       int cpu, off = 0;
+       u32 size;
+
+       /* per_cpu areas are zero-filled and bpf programs can only
+        * access 'value_size' of them, so copying rounded areas
+        * will not leak any kernel data
+        */
+       size = round_up(map->value_size, 8);
+       rcu_read_lock();
+       l = __htab_map_lookup_elem(map, key);
+       if (!l)
+               goto out;
+       pptr = htab_elem_get_ptr(l, map->key_size);
+       for_each_possible_cpu(cpu) {
+               bpf_long_memcpy(value + off,
+                               per_cpu_ptr(pptr, cpu), size);
+               off += size;
+       }
+       ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
+}
+
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+                          u64 map_flags)
+{
+       return __htab_percpu_map_update_elem(map, key, value, map_flags, true);
+}
+
+static const struct bpf_map_ops htab_percpu_ops = {
+       .map_alloc = htab_map_alloc,
+       .map_free = htab_map_free,
+       .map_get_next_key = htab_map_get_next_key,
+       .map_lookup_elem = htab_percpu_map_lookup_elem,
+       .map_update_elem = htab_percpu_map_update_elem,
+       .map_delete_elem = htab_map_delete_elem,
+};
+
+static struct bpf_map_type_list htab_percpu_type __read_mostly = {
+       .ops = &htab_percpu_ops,
+       .type = BPF_MAP_TYPE_PERCPU_HASH,
+};
+
 static int __init register_htab_map(void)
 {
        bpf_register_map_type(&htab_type);
+       bpf_register_map_type(&htab_percpu_type);
        return 0;
 }
 late_initcall(register_htab_map);
index 637397059f763564b535cfda2e4eaa9bf1d34fad..c95a753c2007966a752c2c2a5eda00e6a0a39072 100644 (file)
@@ -239,6 +239,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct bpf_map *map;
        void *key, *value, *ptr;
+       u32 value_size;
        struct fd f;
        int err;
 
@@ -259,23 +260,35 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               value_size = round_up(map->value_size, 8) * num_possible_cpus();
+       else
+               value_size = map->value_size;
+
        err = -ENOMEM;
-       value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
+       value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
-       rcu_read_lock();
-       ptr = map->ops->map_lookup_elem(map, key);
-       if (ptr)
-               memcpy(value, ptr, map->value_size);
-       rcu_read_unlock();
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+               err = bpf_percpu_hash_copy(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
+               err = bpf_percpu_array_copy(map, key, value);
+       } else {
+               rcu_read_lock();
+               ptr = map->ops->map_lookup_elem(map, key);
+               if (ptr)
+                       memcpy(value, ptr, value_size);
+               rcu_read_unlock();
+               err = ptr ? 0 : -ENOENT;
+       }
 
-       err = -ENOENT;
-       if (!ptr)
+       if (err)
                goto free_value;
 
        err = -EFAULT;
-       if (copy_to_user(uvalue, value, map->value_size) != 0)
+       if (copy_to_user(uvalue, value, value_size) != 0)
                goto free_value;
 
        err = 0;
@@ -298,6 +311,7 @@ static int map_update_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct bpf_map *map;
        void *key, *value;
+       u32 value_size;
        struct fd f;
        int err;
 
@@ -318,21 +332,30 @@ static int map_update_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               value_size = round_up(map->value_size, 8) * num_possible_cpus();
+       else
+               value_size = map->value_size;
+
        err = -ENOMEM;
-       value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
+       value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
        err = -EFAULT;
-       if (copy_from_user(value, uvalue, map->value_size) != 0)
+       if (copy_from_user(value, uvalue, value_size) != 0)
                goto free_value;
 
-       /* eBPF program that use maps are running under rcu_read_lock(),
-        * therefore all map accessors rely on this fact, so do the same here
-        */
-       rcu_read_lock();
-       err = map->ops->map_update_elem(map, key, value, attr->flags);
-       rcu_read_unlock();
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+               err = bpf_percpu_hash_update(map, key, value, attr->flags);
+       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
+               err = bpf_percpu_array_update(map, key, value, attr->flags);
+       } else {
+               rcu_read_lock();
+               err = map->ops->map_update_elem(map, key, value, attr->flags);
+               rcu_read_unlock();
+       }
 
 free_value:
        kfree(value);
index d1d3e8f57de907764fe3080632062485f5639443..2e7f7ab739e41c46072a69e787bf4e44f560ae55 100644 (file)
@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
                /* adjust offset of jmps if necessary */
                if (i < pos && i + insn->off + 1 > pos)
                        insn->off += delta;
-               else if (i > pos && i + insn->off + 1 < pos)
+               else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
                        insn->off -= delta;
        }
 }
index c03a640ef6da265db01b93c2970ab6b2da7abd67..d27904c193daa1d8a8680522093254cfde376177 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/cpuset.h>
 #include <net/sock.h>
 
 /*
@@ -2739,6 +2740,7 @@ out_unlock_rcu:
 out_unlock_threadgroup:
        percpu_up_write(&cgroup_threadgroup_rwsem);
        cgroup_kn_unlock(of->kn);
+       cpuset_post_attach_flush();
        return ret ?: nbytes;
 }
 
@@ -4655,14 +4657,15 @@ static void css_free_work_fn(struct work_struct *work)
 
        if (ss) {
                /* css free path */
+               struct cgroup_subsys_state *parent = css->parent;
                int id = css->id;
 
-               if (css->parent)
-                       css_put(css->parent);
-
                ss->css_free(css);
                cgroup_idr_remove(&ss->css_idr, id);
                cgroup_put(cgrp);
+
+               if (parent)
+                       css_put(parent);
        } else {
                /* cgroup free path */
                atomic_dec(&cgrp->root->nr_cgrps);
@@ -4758,6 +4761,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
        css->serial_nr = css_serial_nr_next++;
+       atomic_set(&css->online_cnt, 0);
 
        if (cgroup_parent(cgrp)) {
                css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4780,6 +4784,10 @@ static int online_css(struct cgroup_subsys_state *css)
        if (!ret) {
                css->flags |= CSS_ONLINE;
                rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+               atomic_inc(&css->online_cnt);
+               if (css->parent)
+                       atomic_inc(&css->parent->online_cnt);
        }
        return ret;
 }
@@ -5017,10 +5025,15 @@ static void css_killed_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
 
        mutex_lock(&cgroup_mutex);
-       offline_css(css);
-       mutex_unlock(&cgroup_mutex);
 
-       css_put(css);
+       do {
+               offline_css(css);
+               css_put(css);
+               /* @css can't go away while we're holding cgroup_mutex */
+               css = css->parent;
+       } while (css && atomic_dec_and_test(&css->online_cnt));
+
+       mutex_unlock(&cgroup_mutex);
 }
 
 /* css kill confirmation processing requires process context, bounce */
@@ -5029,8 +5042,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
-       INIT_WORK(&css->destroy_work, css_killed_work_fn);
-       queue_work(cgroup_destroy_wq, &css->destroy_work);
+       if (atomic_dec_and_test(&css->online_cnt)) {
+               INIT_WORK(&css->destroy_work, css_killed_work_fn);
+               queue_work(cgroup_destroy_wq, &css->destroy_work);
+       }
 }
 
 /**
index 3e945fcd81796f954a7e1c81ae95e78bd1a91dba..41989ab4db571cbf93d1a12738bc9afc3411e019 100644 (file)
@@ -287,6 +287,8 @@ static struct cpuset top_cpuset = {
 static DEFINE_MUTEX(cpuset_mutex);
 static DEFINE_SPINLOCK(callback_lock);
 
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
 /*
  * CPU / memory hotplug is handled asynchronously.
  */
@@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 }
 
 /*
- * cpuset_migrate_mm
- *
- *    Migrate memory region from one set of nodes to another.
- *
- *    Temporarilly set tasks mems_allowed to target nodes of migration,
- *    so that the migration code can allocate pages on these nodes.
- *
- *    While the mm_struct we are migrating is typically from some
- *    other task, the task_struct mems_allowed that we are hacking
- *    is for our current task, which must allocate new pages for that
- *    migrating memory region.
+ * Migrate memory region from one set of nodes to another.  This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management.  All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
  */
 
+struct cpuset_migrate_mm_work {
+       struct work_struct      work;
+       struct mm_struct        *mm;
+       nodemask_t              from;
+       nodemask_t              to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+       struct cpuset_migrate_mm_work *mwork =
+               container_of(work, struct cpuset_migrate_mm_work, work);
+
+       /* on a wq worker, no need to worry about %current's mems_allowed */
+       do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+       mmput(mwork->mm);
+       kfree(mwork);
+}
+
 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
                                                        const nodemask_t *to)
 {
-       struct task_struct *tsk = current;
-
-       tsk->mems_allowed = *to;
+       struct cpuset_migrate_mm_work *mwork;
 
-       do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+       mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+       if (mwork) {
+               mwork->mm = mm;
+               mwork->from = *from;
+               mwork->to = *to;
+               INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+               queue_work(cpuset_migrate_mm_wq, &mwork->work);
+       } else {
+               mmput(mm);
+       }
+}
 
-       rcu_read_lock();
-       guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
-       rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+       flush_workqueue(cpuset_migrate_mm_wq);
 }
 
 /*
@@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
                mpol_rebind_mm(mm, &cs->mems_allowed);
                if (migrate)
                        cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
-               mmput(mm);
+               else
+                       mmput(mm);
        }
        css_task_iter_end(&it);
 
@@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
                         * @old_mems_allowed is the right nodesets that we
                         * migrate mm from.
                         */
-                       if (is_memory_migrate(cs)) {
+                       if (is_memory_migrate(cs))
                                cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
                                                  &cpuset_attach_nodemask_to);
-                       }
-                       mmput(mm);
+                       else
+                               mmput(mm);
                }
        }
 
@@ -1714,6 +1737,7 @@ out_unlock:
        mutex_unlock(&cpuset_mutex);
        kernfs_unbreak_active_protection(of->kn);
        css_put(&cs->css);
+       flush_workqueue(cpuset_migrate_mm_wq);
        return retval ?: nbytes;
 }
 
@@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void)
        top_cpuset.effective_mems = node_states[N_MEMORY];
 
        register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+       cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+       BUG_ON(!cpuset_migrate_mm_wq);
 }
 
 /**
index 06ae52e99ac255fcf4ec7925205742b85426e487..5946460b24250a264f75c442099bb52ca06b0269 100644 (file)
@@ -49,8 +49,6 @@
 
 #include <asm/irq_regs.h>
 
-static struct workqueue_struct *perf_wq;
-
 typedef int (*remote_function_f)(void *);
 
 struct remote_function_call {
@@ -126,44 +124,181 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
        return data.ret;
 }
 
-static void event_function_call(struct perf_event *event,
-                               int (*active)(void *),
-                               void (*inactive)(void *),
-                               void *data)
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
+static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
+                         struct perf_event_context *ctx)
 {
+       raw_spin_lock(&cpuctx->ctx.lock);
+       if (ctx)
+               raw_spin_lock(&ctx->lock);
+}
+
+static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
+                           struct perf_event_context *ctx)
+{
+       if (ctx)
+               raw_spin_unlock(&ctx->lock);
+       raw_spin_unlock(&cpuctx->ctx.lock);
+}
+
+#define TASK_TOMBSTONE ((void *)-1L)
+
+static bool is_kernel_event(struct perf_event *event)
+{
+       return READ_ONCE(event->owner) == TASK_TOMBSTONE;
+}
+
+/*
+ * On task ctx scheduling...
+ *
+ * When !ctx->nr_events a task context will not be scheduled. This means
+ * we can disable the scheduler hooks (for performance) without leaving
+ * pending task ctx state.
+ *
+ * This however results in two special cases:
+ *
+ *  - removing the last event from a task ctx; this is relatively straight
+ *    forward and is done in __perf_remove_from_context.
+ *
+ *  - adding the first event to a task ctx; this is tricky because we cannot
+ *    rely on ctx->is_active and therefore cannot use event_function_call().
+ *    See perf_install_in_context().
+ *
+ * This is because we need a ctx->lock serialized variable (ctx->is_active)
+ * to reliably determine if a particular task/context is scheduled in. The
+ * task_curr() use in task_function_call() is racy in that a remote context
+ * switch is not a single atomic operation.
+ *
+ * As is, the situation is 'safe' because we set rq->curr before we do the
+ * actual context switch. This means that task_curr() will fail early, but
+ * we'll continue spinning on ctx->is_active until we've passed
+ * perf_event_task_sched_out().
+ *
+ * Without this ctx->lock serialized variable we could have race where we find
+ * the task (and hence the context) would not be active while in fact they are.
+ *
+ * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
+ */
+
+typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
+                       struct perf_event_context *, void *);
+
+struct event_function_struct {
+       struct perf_event *event;
+       event_f func;
+       void *data;
+};
+
+static int event_function(void *info)
+{
+       struct event_function_struct *efs = info;
+       struct perf_event *event = efs->event;
        struct perf_event_context *ctx = event->ctx;
-       struct task_struct *task = ctx->task;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct perf_event_context *task_ctx = cpuctx->task_ctx;
+       int ret = 0;
+
+       WARN_ON_ONCE(!irqs_disabled());
+
+       perf_ctx_lock(cpuctx, task_ctx);
+       /*
+        * Since we do the IPI call without holding ctx->lock things can have
+        * changed, double check we hit the task we set out to hit.
+        */
+       if (ctx->task) {
+               if (ctx->task != current) {
+                       ret = -EAGAIN;
+                       goto unlock;
+               }
+
+               /*
+                * We only use event_function_call() on established contexts,
+                * and event_function() is only ever called when active (or
+                * rather, we'll have bailed in task_function_call() or the
+                * above ctx->task != current test), therefore we must have
+                * ctx->is_active here.
+                */
+               WARN_ON_ONCE(!ctx->is_active);
+               /*
+                * And since we have ctx->is_active, cpuctx->task_ctx must
+                * match.
+                */
+               WARN_ON_ONCE(task_ctx != ctx);
+       } else {
+               WARN_ON_ONCE(&cpuctx->ctx != ctx);
+       }
+
+       efs->func(event, cpuctx, ctx, efs->data);
+unlock:
+       perf_ctx_unlock(cpuctx, task_ctx);
+
+       return ret;
+}
+
+static void event_function_local(struct perf_event *event, event_f func, void *data)
+{
+       struct event_function_struct efs = {
+               .event = event,
+               .func = func,
+               .data = data,
+       };
+
+       int ret = event_function(&efs);
+       WARN_ON_ONCE(ret);
+}
+
+static void event_function_call(struct perf_event *event, event_f func, void *data)
+{
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
+       struct event_function_struct efs = {
+               .event = event,
+               .func = func,
+               .data = data,
+       };
+
+       if (!event->parent) {
+               /*
+                * If this is a !child event, we must hold ctx::mutex to
+                * stabilize the the event->ctx relation. See
+                * perf_event_ctx_lock().
+                */
+               lockdep_assert_held(&ctx->mutex);
+       }
 
        if (!task) {
-               cpu_function_call(event->cpu, active, data);
+               cpu_function_call(event->cpu, event_function, &efs);
                return;
        }
 
 again:
-       if (!task_function_call(task, active, data))
+       if (task == TASK_TOMBSTONE)
+               return;
+
+       if (!task_function_call(task, event_function, &efs))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
-       if (ctx->is_active) {
-               /*
-                * Reload the task pointer, it might have been changed by
-                * a concurrent perf_event_context_sched_out().
-                */
-               task = ctx->task;
-               raw_spin_unlock_irq(&ctx->lock);
-               goto again;
+       /*
+        * Reload the task pointer, it might have been changed by
+        * a concurrent perf_event_context_sched_out().
+        */
+       task = ctx->task;
+       if (task != TASK_TOMBSTONE) {
+               if (ctx->is_active) {
+                       raw_spin_unlock_irq(&ctx->lock);
+                       goto again;
+               }
+               func(event, NULL, ctx, data);
        }
-       inactive(data);
        raw_spin_unlock_irq(&ctx->lock);
 }
 
-#define EVENT_OWNER_KERNEL ((void *) -1)
-
-static bool is_kernel_event(struct perf_event *event)
-{
-       return event->owner == EVENT_OWNER_KERNEL;
-}
-
 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
                       PERF_FLAG_FD_OUTPUT  |\
                       PERF_FLAG_PID_CGROUP |\
@@ -368,28 +503,6 @@ static inline u64 perf_event_clock(struct perf_event *event)
        return event->clock();
 }
 
-static inline struct perf_cpu_context *
-__get_cpu_context(struct perf_event_context *ctx)
-{
-       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
-}
-
-static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
-                         struct perf_event_context *ctx)
-{
-       raw_spin_lock(&cpuctx->ctx.lock);
-       if (ctx)
-               raw_spin_lock(&ctx->lock);
-}
-
-static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
-                           struct perf_event_context *ctx)
-{
-       if (ctx)
-               raw_spin_unlock(&ctx->lock);
-       raw_spin_unlock(&cpuctx->ctx.lock);
-}
-
 #ifdef CONFIG_CGROUP_PERF
 
 static inline bool
@@ -579,13 +692,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
         * we are holding the rcu lock
         */
        cgrp1 = perf_cgroup_from_task(task, NULL);
-
-       /*
-        * next is NULL when called from perf_event_enable_on_exec()
-        * that will systematically cause a cgroup_switch()
-        */
-       if (next)
-               cgrp2 = perf_cgroup_from_task(next, NULL);
+       cgrp2 = perf_cgroup_from_task(next, NULL);
 
        /*
         * only schedule out current cgroup events if we know
@@ -611,8 +718,6 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
         * we are holding the rcu lock
         */
        cgrp1 = perf_cgroup_from_task(task, NULL);
-
-       /* prev can never be NULL */
        cgrp2 = perf_cgroup_from_task(prev, NULL);
 
        /*
@@ -917,7 +1022,7 @@ static void put_ctx(struct perf_event_context *ctx)
        if (atomic_dec_and_test(&ctx->refcount)) {
                if (ctx->parent_ctx)
                        put_ctx(ctx->parent_ctx);
-               if (ctx->task)
+               if (ctx->task && ctx->task != TASK_TOMBSTONE)
                        put_task_struct(ctx->task);
                call_rcu(&ctx->rcu_head, free_ctx);
        }
@@ -934,9 +1039,8 @@ static void put_ctx(struct perf_event_context *ctx)
  * perf_event_context::mutex nests and those are:
  *
  *  - perf_event_exit_task_context()   [ child , 0 ]
- *      __perf_event_exit_task()
- *        sync_child_event()
- *          put_event()                        [ parent, 1 ]
+ *      perf_event_exit_event()
+ *        put_event()                  [ parent, 1 ]
  *
  *  - perf_event_init_context()                [ parent, 0 ]
  *      inherit_task_group()
@@ -979,8 +1083,8 @@ static void put_ctx(struct perf_event_context *ctx)
  * Lock order:
  *     task_struct::perf_event_mutex
  *       perf_event_context::mutex
- *         perf_event_context::lock
  *         perf_event::child_mutex;
+ *           perf_event_context::lock
  *         perf_event::mmap_mutex
  *         mmap_sem
  */
@@ -1078,6 +1182,7 @@ static u64 primary_event_id(struct perf_event *event)
 
 /*
  * Get the perf_event_context for a task and lock it.
+ *
  * This has to cope with with the fact that until it is locked,
  * the context could get moved to another task.
  */
@@ -1118,9 +1223,12 @@ retry:
                        goto retry;
                }
 
-               if (!atomic_inc_not_zero(&ctx->refcount)) {
+               if (ctx->task == TASK_TOMBSTONE ||
+                   !atomic_inc_not_zero(&ctx->refcount)) {
                        raw_spin_unlock(&ctx->lock);
                        ctx = NULL;
+               } else {
+                       WARN_ON_ONCE(ctx->task != task);
                }
        }
        rcu_read_unlock();
@@ -1246,6 +1354,8 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
+       lockdep_assert_held(&ctx->lock);
+
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
        event->attach_state |= PERF_ATTACH_CONTEXT;
 
@@ -1448,11 +1558,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
        if (is_cgroup_event(event)) {
                ctx->nr_cgroups--;
+               /*
+                * Because cgroup events are always per-cpu events, this will
+                * always be called from the right CPU.
+                */
                cpuctx = __get_cpu_context(ctx);
                /*
-                * if there are no more cgroup events
-                * then cler cgrp to avoid stale pointer
-                * in update_cgrp_time_from_cpuctx()
+                * If there are no more cgroup events then clear cgrp to avoid
+                * stale pointer in update_cgrp_time_from_cpuctx().
                 */
                if (!ctx->nr_cgroups)
                        cpuctx->cgrp = NULL;
@@ -1530,45 +1643,11 @@ out:
                perf_event__header_size(tmp);
 }
 
-/*
- * User event without the task.
- */
 static bool is_orphaned_event(struct perf_event *event)
 {
-       return event && !is_kernel_event(event) && !event->owner;
+       return event->state == PERF_EVENT_STATE_EXIT;
 }
 
-/*
- * Event has a parent but parent's task finished and it's
- * alive only because of children holding refference.
- */
-static bool is_orphaned_child(struct perf_event *event)
-{
-       return is_orphaned_event(event->parent);
-}
-
-static void orphans_remove_work(struct work_struct *work);
-
-static void schedule_orphans_remove(struct perf_event_context *ctx)
-{
-       if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
-               return;
-
-       if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
-               get_ctx(ctx);
-               ctx->orphans_remove_sched = true;
-       }
-}
-
-static int __init perf_workqueue_init(void)
-{
-       perf_wq = create_singlethread_workqueue("perf");
-       WARN(!perf_wq, "failed to create perf workqueue\n");
-       return perf_wq ? 0 : -1;
-}
-
-core_initcall(perf_workqueue_init);
-
 static inline int pmu_filter_match(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
@@ -1629,9 +1708,6 @@ event_sched_out(struct perf_event *event,
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
 
-       if (is_orphaned_child(event))
-               schedule_orphans_remove(ctx);
-
        perf_pmu_enable(event->pmu);
 }
 
@@ -1655,21 +1731,8 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
-struct remove_event {
-       struct perf_event *event;
-       bool detach_group;
-};
-
-static void ___perf_remove_from_context(void *info)
-{
-       struct remove_event *re = info;
-       struct perf_event *event = re->event;
-       struct perf_event_context *ctx = event->ctx;
-
-       if (re->detach_group)
-               perf_group_detach(event);
-       list_del_event(event, ctx);
-}
+#define DETACH_GROUP   0x01UL
+#define DETACH_STATE   0x02UL
 
 /*
  * Cross CPU call to remove a performance event
@@ -1677,33 +1740,33 @@ static void ___perf_remove_from_context(void *info)
  * We disable the event on the hardware level first. After that we
  * remove it from the context list.
  */
-static int __perf_remove_from_context(void *info)
+static void
+__perf_remove_from_context(struct perf_event *event,
+                          struct perf_cpu_context *cpuctx,
+                          struct perf_event_context *ctx,
+                          void *info)
 {
-       struct remove_event *re = info;
-       struct perf_event *event = re->event;
-       struct perf_event_context *ctx = event->ctx;
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       unsigned long flags = (unsigned long)info;
 
-       raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
-       if (re->detach_group)
+       if (flags & DETACH_GROUP)
                perf_group_detach(event);
        list_del_event(event, ctx);
-       if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
+       if (flags & DETACH_STATE)
+               event->state = PERF_EVENT_STATE_EXIT;
+
+       if (!ctx->nr_events && ctx->is_active) {
                ctx->is_active = 0;
-               cpuctx->task_ctx = NULL;
+               if (ctx->task) {
+                       WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+                       cpuctx->task_ctx = NULL;
+               }
        }
-       raw_spin_unlock(&ctx->lock);
-
-       return 0;
 }
 
 /*
  * Remove the event from a task's (or a CPU's) list of events.
  *
- * CPU events are removed with a smp call. For task events we only
- * call when the task is on a CPU.
- *
  * If event->ctx is a cloned context, callers must make sure that
  * every task struct that event->ctx->task could possibly point to
  * remains valid.  This is OK when called from perf_release since
@@ -1711,73 +1774,32 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
 {
-       struct perf_event_context *ctx = event->ctx;
-       struct remove_event re = {
-               .event = event,
-               .detach_group = detach_group,
-       };
+       lockdep_assert_held(&event->ctx->mutex);
 
-       lockdep_assert_held(&ctx->mutex);
-
-       event_function_call(event, __perf_remove_from_context,
-                           ___perf_remove_from_context, &re);
+       event_function_call(event, __perf_remove_from_context, (void *)flags);
 }
 
 /*
  * Cross CPU call to disable a performance event
  */
-int __perf_event_disable(void *info)
-{
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
-
-       /*
-        * If this is a per-task event, need to check whether this
-        * event's task is the current task on this cpu.
-        *
-        * Can trigger due to concurrent perf_event_context_sched_out()
-        * flipping contexts around.
-        */
-       if (ctx->task && cpuctx->task_ctx != ctx)
-               return -EINVAL;
-
-       raw_spin_lock(&ctx->lock);
-
-       /*
-        * If the event is on, turn it off.
-        * If it is in error state, leave it in error state.
-        */
-       if (event->state >= PERF_EVENT_STATE_INACTIVE) {
-               update_context_time(ctx);
-               update_cgrp_time_from_event(event);
-               update_group_times(event);
-               if (event == event->group_leader)
-                       group_sched_out(event, cpuctx, ctx);
-               else
-                       event_sched_out(event, cpuctx, ctx);
-               event->state = PERF_EVENT_STATE_OFF;
-       }
-
-       raw_spin_unlock(&ctx->lock);
-
-       return 0;
-}
-
-void ___perf_event_disable(void *info)
+static void __perf_event_disable(struct perf_event *event,
+                                struct perf_cpu_context *cpuctx,
+                                struct perf_event_context *ctx,
+                                void *info)
 {
-       struct perf_event *event = info;
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
+               return;
 
-       /*
-        * Since we have the lock this context can't be scheduled
-        * in, so we can change the state safely.
-        */
-       if (event->state == PERF_EVENT_STATE_INACTIVE) {
-               update_group_times(event);
-               event->state = PERF_EVENT_STATE_OFF;
-       }
+       update_context_time(ctx);
+       update_cgrp_time_from_event(event);
+       update_group_times(event);
+       if (event == event->group_leader)
+               group_sched_out(event, cpuctx, ctx);
+       else
+               event_sched_out(event, cpuctx, ctx);
+       event->state = PERF_EVENT_STATE_OFF;
 }
 
 /*
@@ -1788,7 +1810,8 @@ void ___perf_event_disable(void *info)
  * remains valid.  This condition is satisifed when called through
  * perf_event_for_each_child or perf_event_for_each because they
  * hold the top-level event's child_mutex, so any descendant that
- * goes to exit will block in sync_child_event.
+ * goes to exit will block in perf_event_exit_event().
+ *
  * When called from perf_pending_event it's OK because event->ctx
  * is the current context on this CPU and preemption is disabled,
  * hence we can't get into perf_event_task_sched_out for this context.
@@ -1804,8 +1827,12 @@ static void _perf_event_disable(struct perf_event *event)
        }
        raw_spin_unlock_irq(&ctx->lock);
 
-       event_function_call(event, __perf_event_disable,
-                           ___perf_event_disable, event);
+       event_function_call(event, __perf_event_disable, NULL);
+}
+
+void perf_event_disable_local(struct perf_event *event)
+{
+       event_function_local(event, __perf_event_disable, NULL);
 }
 
 /*
@@ -1918,9 +1945,6 @@ event_sched_in(struct perf_event *event,
        if (event->attr.exclusive)
                cpuctx->exclusive = 1;
 
-       if (is_orphaned_child(event))
-               schedule_orphans_remove(ctx);
-
 out:
        perf_pmu_enable(event->pmu);
 
@@ -2039,7 +2063,8 @@ static void add_event_to_ctx(struct perf_event *event,
        event->tstamp_stopped = tstamp;
 }
 
-static void task_ctx_sched_out(struct perf_event_context *ctx);
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                              struct perf_event_context *ctx);
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
@@ -2058,16 +2083,15 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
                ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
 }
 
-static void ___perf_install_in_context(void *info)
+static void ctx_resched(struct perf_cpu_context *cpuctx,
+                       struct perf_event_context *task_ctx)
 {
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
-
-       /*
-        * Since the task isn't running, its safe to add the event, us holding
-        * the ctx->lock ensures the task won't get scheduled in.
-        */
-       add_event_to_ctx(event, ctx);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       if (task_ctx)
+               task_ctx_sched_out(cpuctx, task_ctx);
+       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+       perf_event_sched_in(cpuctx, task_ctx, current);
+       perf_pmu_enable(cpuctx->ctx.pmu);
 }
 
 /*
@@ -2077,55 +2101,31 @@ static void ___perf_install_in_context(void *info)
  */
 static int  __perf_install_in_context(void *info)
 {
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
+       struct perf_event_context *ctx = info;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        struct perf_event_context *task_ctx = cpuctx->task_ctx;
-       struct task_struct *task = current;
-
-       perf_ctx_lock(cpuctx, task_ctx);
-       perf_pmu_disable(cpuctx->ctx.pmu);
 
-       /*
-        * If there was an active task_ctx schedule it out.
-        */
-       if (task_ctx)
-               task_ctx_sched_out(task_ctx);
-
-       /*
-        * If the context we're installing events in is not the
-        * active task_ctx, flip them.
-        */
-       if (ctx->task && task_ctx != ctx) {
-               if (task_ctx)
-                       raw_spin_unlock(&task_ctx->lock);
+       raw_spin_lock(&cpuctx->ctx.lock);
+       if (ctx->task) {
                raw_spin_lock(&ctx->lock);
+               /*
+                * If we hit the 'wrong' task, we've since scheduled and
+                * everything should be sorted, nothing to do!
+                */
                task_ctx = ctx;
-       }
+               if (ctx->task != current)
+                       goto unlock;
 
-       if (task_ctx) {
-               cpuctx->task_ctx = task_ctx;
-               task = task_ctx->task;
+               /*
+                * If task_ctx is set, it had better be to us.
+                */
+               WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx);
+       } else if (task_ctx) {
+               raw_spin_lock(&task_ctx->lock);
        }
 
-       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
-
-       update_context_time(ctx);
-       /*
-        * update cgrp time only if current cgrp
-        * matches event->cgrp. Must be done before
-        * calling add_event_to_ctx()
-        */
-       update_cgrp_time_from_event(event);
-
-       add_event_to_ctx(event, ctx);
-
-       /*
-        * Schedule everything back in
-        */
-       perf_event_sched_in(cpuctx, task_ctx, task);
-
-       perf_pmu_enable(cpuctx->ctx.pmu);
+       ctx_resched(cpuctx, task_ctx);
+unlock:
        perf_ctx_unlock(cpuctx, task_ctx);
 
        return 0;
@@ -2133,27 +2133,54 @@ static int  __perf_install_in_context(void *info)
 
 /*
  * Attach a performance event to a context
- *
- * First we add the event to the list with the hardware enable bit
- * in event->hw_config cleared.
- *
- * If the event is attached to a task which is on a CPU we use a smp
- * call to enable it in the task context. The task might have been
- * scheduled away, but we check this in the smp call again.
  */
 static void
 perf_install_in_context(struct perf_event_context *ctx,
                        struct perf_event *event,
                        int cpu)
 {
+       struct task_struct *task = NULL;
+
        lockdep_assert_held(&ctx->mutex);
 
        event->ctx = ctx;
        if (event->cpu != -1)
                event->cpu = cpu;
 
-       event_function_call(event, __perf_install_in_context,
-                           ___perf_install_in_context, event);
+       /*
+        * Installing events is tricky because we cannot rely on ctx->is_active
+        * to be set in case this is the nr_events 0 -> 1 transition.
+        *
+        * So what we do is we add the event to the list here, which will allow
+        * a future context switch to DTRT and then send a racy IPI. If the IPI
+        * fails to hit the right task, this means a context switch must have
+        * happened and that will have taken care of business.
+        */
+       raw_spin_lock_irq(&ctx->lock);
+       task = ctx->task;
+       /*
+        * Worse, we cannot even rely on the ctx actually existing anymore. If
+        * between find_get_context() and perf_install_in_context() the task
+        * went through perf_event_exit_task() its dead and we should not be
+        * adding new events.
+        */
+       if (task == TASK_TOMBSTONE) {
+               raw_spin_unlock_irq(&ctx->lock);
+               return;
+       }
+       update_context_time(ctx);
+       /*
+        * Update cgrp time only if current cgrp matches event->cgrp.
+        * Must be done before calling add_event_to_ctx().
+        */
+       update_cgrp_time_from_event(event);
+       add_event_to_ctx(event, ctx);
+       raw_spin_unlock_irq(&ctx->lock);
+
+       if (task)
+               task_function_call(task, __perf_install_in_context, ctx);
+       else
+               cpu_function_call(cpu, __perf_install_in_context, ctx);
 }
 
 /*
@@ -2180,43 +2207,30 @@ static void __perf_event_mark_enabled(struct perf_event *event)
 /*
  * Cross CPU call to enable a performance event
  */
-static int __perf_event_enable(void *info)
+static void __perf_event_enable(struct perf_event *event,
+                               struct perf_cpu_context *cpuctx,
+                               struct perf_event_context *ctx,
+                               void *info)
 {
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
-       int err;
+       struct perf_event_context *task_ctx;
 
-       /*
-        * There's a time window between 'ctx->is_active' check
-        * in perf_event_enable function and this place having:
-        *   - IRQs on
-        *   - ctx->lock unlocked
-        *
-        * where the task could be killed and 'ctx' deactivated
-        * by perf_event_exit_task.
-        */
-       if (!ctx->is_active)
-               return -EINVAL;
+       if (event->state >= PERF_EVENT_STATE_INACTIVE ||
+           event->state <= PERF_EVENT_STATE_ERROR)
+               return;
 
-       raw_spin_lock(&ctx->lock);
        update_context_time(ctx);
-
-       if (event->state >= PERF_EVENT_STATE_INACTIVE)
-               goto unlock;
-
-       /*
-        * set current task's cgroup time reference point
-        */
-       perf_cgroup_set_timestamp(current, ctx);
-
        __perf_event_mark_enabled(event);
 
+       if (!ctx->is_active)
+               return;
+
        if (!event_filter_match(event)) {
-               if (is_cgroup_event(event))
+               if (is_cgroup_event(event)) {
+                       perf_cgroup_set_timestamp(current, ctx); // XXX ?
                        perf_cgroup_defer_enabled(event);
-               goto unlock;
+               }
+               return;
        }
 
        /*
@@ -2224,41 +2238,13 @@ static int __perf_event_enable(void *info)
         * then don't put it on unless the group is on.
         */
        if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
-               goto unlock;
-
-       if (!group_can_go_on(event, cpuctx, 1)) {
-               err = -EEXIST;
-       } else {
-               if (event == leader)
-                       err = group_sched_in(event, cpuctx, ctx);
-               else
-                       err = event_sched_in(event, cpuctx, ctx);
-       }
-
-       if (err) {
-               /*
-                * If this event can't go on and it's part of a
-                * group, then the whole group has to come off.
-                */
-               if (leader != event) {
-                       group_sched_out(leader, cpuctx, ctx);
-                       perf_mux_hrtimer_restart(cpuctx);
-               }
-               if (leader->attr.pinned) {
-                       update_group_times(leader);
-                       leader->state = PERF_EVENT_STATE_ERROR;
-               }
-       }
-
-unlock:
-       raw_spin_unlock(&ctx->lock);
+               return;
 
-       return 0;
-}
+       task_ctx = cpuctx->task_ctx;
+       if (ctx->task)
+               WARN_ON_ONCE(task_ctx != ctx);
 
-void ___perf_event_enable(void *info)
-{
-       __perf_event_mark_enabled((struct perf_event *)info);
+       ctx_resched(cpuctx, task_ctx);
 }
 
 /*
@@ -2275,7 +2261,8 @@ static void _perf_event_enable(struct perf_event *event)
        struct perf_event_context *ctx = event->ctx;
 
        raw_spin_lock_irq(&ctx->lock);
-       if (event->state >= PERF_EVENT_STATE_INACTIVE) {
+       if (event->state >= PERF_EVENT_STATE_INACTIVE ||
+           event->state <  PERF_EVENT_STATE_ERROR) {
                raw_spin_unlock_irq(&ctx->lock);
                return;
        }
@@ -2291,8 +2278,7 @@ static void _perf_event_enable(struct perf_event *event)
                event->state = PERF_EVENT_STATE_OFF;
        raw_spin_unlock_irq(&ctx->lock);
 
-       event_function_call(event, __perf_event_enable,
-                           ___perf_event_enable, event);
+       event_function_call(event, __perf_event_enable, NULL);
 }
 
 /*
@@ -2342,12 +2328,27 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)
 {
-       struct perf_event *event;
        int is_active = ctx->is_active;
+       struct perf_event *event;
 
-       ctx->is_active &= ~event_type;
-       if (likely(!ctx->nr_events))
+       lockdep_assert_held(&ctx->lock);
+
+       if (likely(!ctx->nr_events)) {
+               /*
+                * See __perf_remove_from_context().
+                */
+               WARN_ON_ONCE(ctx->is_active);
+               if (ctx->task)
+                       WARN_ON_ONCE(cpuctx->task_ctx);
                return;
+       }
+
+       ctx->is_active &= ~event_type;
+       if (ctx->task) {
+               WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+               if (!ctx->is_active)
+                       cpuctx->task_ctx = NULL;
+       }
 
        update_context_time(ctx);
        update_cgrp_time_from_cpuctx(cpuctx);
@@ -2518,17 +2519,21 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
                raw_spin_lock(&ctx->lock);
                raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
                if (context_equiv(ctx, next_ctx)) {
-                       /*
-                        * XXX do we need a memory barrier of sorts
-                        * wrt to rcu_dereference() of perf_event_ctxp
-                        */
-                       task->perf_event_ctxp[ctxn] = next_ctx;
-                       next->perf_event_ctxp[ctxn] = ctx;
-                       ctx->task = next;
-                       next_ctx->task = task;
+                       WRITE_ONCE(ctx->task, next);
+                       WRITE_ONCE(next_ctx->task, task);
 
                        swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
 
+                       /*
+                        * RCU_INIT_POINTER here is safe because we've not
+                        * modified the ctx and the above modification of
+                        * ctx->task and ctx->task_ctx_data are immaterial
+                        * since those values are always verified under
+                        * ctx->lock which we're now holding.
+                        */
+                       RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
+                       RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
+
                        do_switch = 0;
 
                        perf_event_sync_stat(ctx, next_ctx);
@@ -2541,8 +2546,7 @@ unlock:
 
        if (do_switch) {
                raw_spin_lock(&ctx->lock);
-               ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-               cpuctx->task_ctx = NULL;
+               task_ctx_sched_out(cpuctx, ctx);
                raw_spin_unlock(&ctx->lock);
        }
 }
@@ -2637,10 +2641,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
                perf_cgroup_sched_out(task, next);
 }
 
-static void task_ctx_sched_out(struct perf_event_context *ctx)
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                              struct perf_event_context *ctx)
 {
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
-
        if (!cpuctx->task_ctx)
                return;
 
@@ -2648,7 +2651,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx)
                return;
 
        ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-       cpuctx->task_ctx = NULL;
 }
 
 /*
@@ -2725,13 +2727,22 @@ ctx_sched_in(struct perf_event_context *ctx,
             enum event_type_t event_type,
             struct task_struct *task)
 {
-       u64 now;
        int is_active = ctx->is_active;
+       u64 now;
+
+       lockdep_assert_held(&ctx->lock);
 
-       ctx->is_active |= event_type;
        if (likely(!ctx->nr_events))
                return;
 
+       ctx->is_active |= event_type;
+       if (ctx->task) {
+               if (!is_active)
+                       cpuctx->task_ctx = ctx;
+               else
+                       WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+       }
+
        now = perf_clock();
        ctx->timestamp = now;
        perf_cgroup_set_timestamp(task, ctx);
@@ -2773,12 +2784,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         * cpu flexible, task flexible.
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-
-       if (ctx->nr_events)
-               cpuctx->task_ctx = ctx;
-
-       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
-
+       perf_event_sched_in(cpuctx, ctx, task);
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
 }
@@ -2800,6 +2806,16 @@ void __perf_event_task_sched_in(struct task_struct *prev,
        struct perf_event_context *ctx;
        int ctxn;
 
+       /*
+        * If cgroup events exist on this CPU, then we need to check if we have
+        * to switch in PMU state; cgroup event are system-wide mode only.
+        *
+        * Since cgroup events are CPU events, we must schedule these in before
+        * we schedule in the task events.
+        */
+       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
+               perf_cgroup_sched_in(prev, task);
+
        for_each_task_context_nr(ctxn) {
                ctx = task->perf_event_ctxp[ctxn];
                if (likely(!ctx))
@@ -2807,13 +2823,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
 
                perf_event_context_sched_in(ctx, task);
        }
-       /*
-        * if cgroup events exist on this CPU, then we need
-        * to check if we have to switch in PMU state.
-        * cgroup event are system-wide mode only
-        */
-       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_sched_in(prev, task);
 
        if (atomic_read(&nr_switch_events))
                perf_event_switch(task, prev, true);
@@ -3099,46 +3108,30 @@ static int event_enable_on_exec(struct perf_event *event,
 static void perf_event_enable_on_exec(int ctxn)
 {
        struct perf_event_context *ctx, *clone_ctx = NULL;
+       struct perf_cpu_context *cpuctx;
        struct perf_event *event;
        unsigned long flags;
        int enabled = 0;
-       int ret;
 
        local_irq_save(flags);
        ctx = current->perf_event_ctxp[ctxn];
        if (!ctx || !ctx->nr_events)
                goto out;
 
-       /*
-        * We must ctxsw out cgroup events to avoid conflict
-        * when invoking perf_task_event_sched_in() later on
-        * in this function. Otherwise we end up trying to
-        * ctxswin cgroup events which are already scheduled
-        * in.
-        */
-       perf_cgroup_sched_out(current, NULL);
-
-       raw_spin_lock(&ctx->lock);
-       task_ctx_sched_out(ctx);
-
-       list_for_each_entry(event, &ctx->event_list, event_entry) {
-               ret = event_enable_on_exec(event, ctx);
-               if (ret)
-                       enabled = 1;
-       }
+       cpuctx = __get_cpu_context(ctx);
+       perf_ctx_lock(cpuctx, ctx);
+       list_for_each_entry(event, &ctx->event_list, event_entry)
+               enabled |= event_enable_on_exec(event, ctx);
 
        /*
-        * Unclone this context if we enabled any event.
+        * Unclone and reschedule this context if we enabled any event.
         */
-       if (enabled)
+       if (enabled) {
                clone_ctx = unclone_ctx(ctx);
+               ctx_resched(cpuctx, ctx);
+       }
+       perf_ctx_unlock(cpuctx, ctx);
 
-       raw_spin_unlock(&ctx->lock);
-
-       /*
-        * Also calls ctxswin for cgroup events, if any:
-        */
-       perf_event_context_sched_in(ctx, ctx->task);
 out:
        local_irq_restore(flags);
 
@@ -3334,7 +3327,6 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
        INIT_LIST_HEAD(&ctx->flexible_groups);
        INIT_LIST_HEAD(&ctx->event_list);
        atomic_set(&ctx->refcount, 1);
-       INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
 }
 
 static struct perf_event_context *
@@ -3521,11 +3513,13 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
 
 static void unaccount_event(struct perf_event *event)
 {
+       bool dec = false;
+
        if (event->parent)
                return;
 
        if (event->attach_state & PERF_ATTACH_TASK)
-               static_key_slow_dec_deferred(&perf_sched_events);
+               dec = true;
        if (event->attr.mmap || event->attr.mmap_data)
                atomic_dec(&nr_mmap_events);
        if (event->attr.comm)
@@ -3535,12 +3529,15 @@ static void unaccount_event(struct perf_event *event)
        if (event->attr.freq)
                atomic_dec(&nr_freq_events);
        if (event->attr.context_switch) {
-               static_key_slow_dec_deferred(&perf_sched_events);
+               dec = true;
                atomic_dec(&nr_switch_events);
        }
        if (is_cgroup_event(event))
-               static_key_slow_dec_deferred(&perf_sched_events);
+               dec = true;
        if (has_branch_stack(event))
+               dec = true;
+
+       if (dec)
                static_key_slow_dec_deferred(&perf_sched_events);
 
        unaccount_event_cpu(event, event->cpu);
@@ -3556,7 +3553,7 @@ static void unaccount_event(struct perf_event *event)
  *  3) two matching events on the same context.
  *
  * The former two cases are handled in the allocation path (perf_event_alloc(),
- * __free_event()), the latter -- before the first perf_install_in_context().
+ * _free_event()), the latter -- before the first perf_install_in_context().
  */
 static int exclusive_event_init(struct perf_event *event)
 {
@@ -3631,29 +3628,6 @@ static bool exclusive_event_installable(struct perf_event *event,
        return true;
 }
 
-static void __free_event(struct perf_event *event)
-{
-       if (!event->parent) {
-               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
-                       put_callchain_buffers();
-       }
-
-       perf_event_free_bpf_prog(event);
-
-       if (event->destroy)
-               event->destroy(event);
-
-       if (event->ctx)
-               put_ctx(event->ctx);
-
-       if (event->pmu) {
-               exclusive_event_destroy(event);
-               module_put(event->pmu->module);
-       }
-
-       call_rcu(&event->rcu_head, free_event_rcu);
-}
-
 static void _free_event(struct perf_event *event)
 {
        irq_work_sync(&event->pending);
@@ -3675,7 +3649,25 @@ static void _free_event(struct perf_event *event)
        if (is_cgroup_event(event))
                perf_detach_cgroup(event);
 
-       __free_event(event);
+       if (!event->parent) {
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
+       }
+
+       perf_event_free_bpf_prog(event);
+
+       if (event->destroy)
+               event->destroy(event);
+
+       if (event->ctx)
+               put_ctx(event->ctx);
+
+       if (event->pmu) {
+               exclusive_event_destroy(event);
+               module_put(event->pmu->module);
+       }
+
+       call_rcu(&event->rcu_head, free_event_rcu);
 }
 
 /*
@@ -3702,14 +3694,13 @@ static void perf_remove_from_owner(struct perf_event *event)
        struct task_struct *owner;
 
        rcu_read_lock();
-       owner = ACCESS_ONCE(event->owner);
        /*
-        * Matches the smp_wmb() in perf_event_exit_task(). If we observe
-        * !owner it means the list deletion is complete and we can indeed
-        * free this event, otherwise we need to serialize on
+        * Matches the smp_store_release() in perf_event_exit_task(). If we
+        * observe !owner it means the list deletion is complete and we can
+        * indeed free this event, otherwise we need to serialize on
         * owner->perf_event_mutex.
         */
-       smp_read_barrier_depends();
+       owner = lockless_dereference(event->owner);
        if (owner) {
                /*
                 * Since delayed_put_task_struct() also drops the last
@@ -3737,8 +3728,10 @@ static void perf_remove_from_owner(struct perf_event *event)
                 * ensured they're done, and we can proceed with freeing the
                 * event.
                 */
-               if (event->owner)
+               if (event->owner) {
                        list_del_init(&event->owner_entry);
+                       smp_store_release(&event->owner, NULL);
+               }
                mutex_unlock(&owner->perf_event_mutex);
                put_task_struct(owner);
        }
@@ -3746,36 +3739,98 @@ static void perf_remove_from_owner(struct perf_event *event)
 
 static void put_event(struct perf_event *event)
 {
-       struct perf_event_context *ctx;
-
        if (!atomic_long_dec_and_test(&event->refcount))
                return;
 
+       _free_event(event);
+}
+
+/*
+ * Kill an event dead; while event:refcount will preserve the event
+ * object, it will not preserve its functionality. Once the last 'user'
+ * gives up the object, we'll destroy the thing.
+ */
+int perf_event_release_kernel(struct perf_event *event)
+{
+       struct perf_event_context *ctx;
+       struct perf_event *child, *tmp;
+
        if (!is_kernel_event(event))
                perf_remove_from_owner(event);
 
+       ctx = perf_event_ctx_lock(event);
+       WARN_ON_ONCE(ctx->parent_ctx);
+       perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
+       perf_event_ctx_unlock(event, ctx);
+
        /*
-        * There are two ways this annotation is useful:
+        * At this point we must have event->state == PERF_EVENT_STATE_EXIT,
+        * either from the above perf_remove_from_context() or through
+        * perf_event_exit_event().
         *
-        *  1) there is a lock recursion from perf_event_exit_task
-        *     see the comment there.
+        * Therefore, anybody acquiring event->child_mutex after the below
+        * loop _must_ also see this, most importantly inherit_event() which
+        * will avoid placing more children on the list.
         *
-        *  2) there is a lock-inversion with mmap_sem through
-        *     perf_read_group(), which takes faults while
-        *     holding ctx->mutex, however this is called after
-        *     the last filedesc died, so there is no possibility
-        *     to trigger the AB-BA case.
+        * Thus this guarantees that we will in fact observe and kill _ALL_
+        * child events.
         */
-       ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
-       WARN_ON_ONCE(ctx->parent_ctx);
-       perf_remove_from_context(event, true);
-       perf_event_ctx_unlock(event, ctx);
+       WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT);
 
-       _free_event(event);
-}
+again:
+       mutex_lock(&event->child_mutex);
+       list_for_each_entry(child, &event->child_list, child_list) {
 
-int perf_event_release_kernel(struct perf_event *event)
-{
+               /*
+                * Cannot change, child events are not migrated, see the
+                * comment with perf_event_ctx_lock_nested().
+                */
+               ctx = lockless_dereference(child->ctx);
+               /*
+                * Since child_mutex nests inside ctx::mutex, we must jump
+                * through hoops. We start by grabbing a reference on the ctx.
+                *
+                * Since the event cannot get freed while we hold the
+                * child_mutex, the context must also exist and have a !0
+                * reference count.
+                */
+               get_ctx(ctx);
+
+               /*
+                * Now that we have a ctx ref, we can drop child_mutex, and
+                * acquire ctx::mutex without fear of it going away. Then we
+                * can re-acquire child_mutex.
+                */
+               mutex_unlock(&event->child_mutex);
+               mutex_lock(&ctx->mutex);
+               mutex_lock(&event->child_mutex);
+
+               /*
+                * Now that we hold ctx::mutex and child_mutex, revalidate our
+                * state, if child is still the first entry, it didn't get freed
+                * and we can continue doing so.
+                */
+               tmp = list_first_entry_or_null(&event->child_list,
+                                              struct perf_event, child_list);
+               if (tmp == child) {
+                       perf_remove_from_context(child, DETACH_GROUP);
+                       list_del(&child->child_list);
+                       free_event(child);
+                       /*
+                        * This matches the refcount bump in inherit_event();
+                        * this can't be the last reference.
+                        */
+                       put_event(event);
+               }
+
+               mutex_unlock(&event->child_mutex);
+               mutex_unlock(&ctx->mutex);
+               put_ctx(ctx);
+               goto again;
+       }
+       mutex_unlock(&event->child_mutex);
+
+       /* Must be the last reference */
        put_event(event);
        return 0;
 }
@@ -3786,46 +3841,10 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  */
 static int perf_release(struct inode *inode, struct file *file)
 {
-       put_event(file->private_data);
+       perf_event_release_kernel(file->private_data);
        return 0;
 }
 
-/*
- * Remove all orphanes events from the context.
- */
-static void orphans_remove_work(struct work_struct *work)
-{
-       struct perf_event_context *ctx;
-       struct perf_event *event, *tmp;
-
-       ctx = container_of(work, struct perf_event_context,
-                          orphans_remove.work);
-
-       mutex_lock(&ctx->mutex);
-       list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
-               struct perf_event *parent_event = event->parent;
-
-               if (!is_orphaned_child(event))
-                       continue;
-
-               perf_remove_from_context(event, true);
-
-               mutex_lock(&parent_event->child_mutex);
-               list_del_init(&event->child_list);
-               mutex_unlock(&parent_event->child_mutex);
-
-               free_event(event);
-               put_event(parent_event);
-       }
-
-       raw_spin_lock_irq(&ctx->lock);
-       ctx->orphans_remove_sched = false;
-       raw_spin_unlock_irq(&ctx->lock);
-       mutex_unlock(&ctx->mutex);
-
-       put_ctx(ctx);
-}
-
 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
 {
        struct perf_event *child;
@@ -4054,7 +4073,7 @@ static void _perf_event_reset(struct perf_event *event)
 /*
  * Holding the top-level event's child_mutex means that any
  * descendant process that has inherited this event will block
- * in sync_child_event if it goes to exit, thus satisfying the
+ * in perf_event_exit_event() if it goes to exit, thus satisfying the
  * task existence requirements of perf_event_enable/disable.
  */
 static void perf_event_for_each_child(struct perf_event *event,
@@ -4086,36 +4105,14 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-struct period_event {
-       struct perf_event *event;
-       u64 value;
-};
-
-static void ___perf_event_period(void *info)
-{
-       struct period_event *pe = info;
-       struct perf_event *event = pe->event;
-       u64 value = pe->value;
-
-       if (event->attr.freq) {
-               event->attr.sample_freq = value;
-       } else {
-               event->attr.sample_period = value;
-               event->hw.sample_period = value;
-       }
-
-       local64_set(&event->hw.period_left, 0);
-}
-
-static int __perf_event_period(void *info)
+static void __perf_event_period(struct perf_event *event,
+                               struct perf_cpu_context *cpuctx,
+                               struct perf_event_context *ctx,
+                               void *info)
 {
-       struct period_event *pe = info;
-       struct perf_event *event = pe->event;
-       struct perf_event_context *ctx = event->ctx;
-       u64 value = pe->value;
+       u64 value = *((u64 *)info);
        bool active;
 
-       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
                event->attr.sample_freq = value;
        } else {
@@ -4135,14 +4132,10 @@ static int __perf_event_period(void *info)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
-       raw_spin_unlock(&ctx->lock);
-
-       return 0;
 }
 
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
-       struct period_event pe = { .event = event, };
        u64 value;
 
        if (!is_sampling_event(event))
@@ -4157,10 +4150,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
        if (event->attr.freq && value > sysctl_perf_event_sample_rate)
                return -EINVAL;
 
-       pe.value = value;
-
-       event_function_call(event, __perf_event_period,
-                           ___perf_event_period, &pe);
+       event_function_call(event, __perf_event_period, &value);
 
        return 0;
 }
@@ -4932,7 +4922,7 @@ static void perf_pending_event(struct irq_work *entry)
 
        if (event->pending_disable) {
                event->pending_disable = 0;
-               __perf_event_disable(event);
+               perf_event_disable_local(event);
        }
 
        if (event->pending_wakeup) {
@@ -7753,11 +7743,13 @@ static void account_event_cpu(struct perf_event *event, int cpu)
 
 static void account_event(struct perf_event *event)
 {
+       bool inc = false;
+
        if (event->parent)
                return;
 
        if (event->attach_state & PERF_ATTACH_TASK)
-               static_key_slow_inc(&perf_sched_events.key);
+               inc = true;
        if (event->attr.mmap || event->attr.mmap_data)
                atomic_inc(&nr_mmap_events);
        if (event->attr.comm)
@@ -7770,11 +7762,14 @@ static void account_event(struct perf_event *event)
        }
        if (event->attr.context_switch) {
                atomic_inc(&nr_switch_events);
-               static_key_slow_inc(&perf_sched_events.key);
+               inc = true;
        }
        if (has_branch_stack(event))
-               static_key_slow_inc(&perf_sched_events.key);
+               inc = true;
        if (is_cgroup_event(event))
+               inc = true;
+
+       if (inc)
                static_key_slow_inc(&perf_sched_events.key);
 
        account_event_cpu(event, event->cpu);
@@ -8422,11 +8417,11 @@ SYSCALL_DEFINE5(perf_event_open,
                 * See perf_event_ctx_lock() for comments on the details
                 * of swizzling perf_event::ctx.
                 */
-               perf_remove_from_context(group_leader, false);
+               perf_remove_from_context(group_leader, 0);
 
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling, false);
+                       perf_remove_from_context(sibling, 0);
                        put_ctx(gctx);
                }
 
@@ -8479,6 +8474,8 @@ SYSCALL_DEFINE5(perf_event_open,
        perf_event__header_size(event);
        perf_event__id_header_size(event);
 
+       event->owner = current;
+
        perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
 
@@ -8488,8 +8485,6 @@ SYSCALL_DEFINE5(perf_event_open,
 
        put_online_cpus();
 
-       event->owner = current;
-
        mutex_lock(&current->perf_event_mutex);
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
@@ -8556,7 +8551,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        }
 
        /* Mark owner so we could distinguish it from user events. */
-       event->owner = EVENT_OWNER_KERNEL;
+       event->owner = TASK_TOMBSTONE;
 
        account_event(event);
 
@@ -8606,7 +8601,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event, false);
+               perf_remove_from_context(event, 0);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -8673,33 +8668,15 @@ static void sync_child_event(struct perf_event *child_event,
                     &parent_event->child_total_time_enabled);
        atomic64_add(child_event->total_time_running,
                     &parent_event->child_total_time_running);
-
-       /*
-        * Remove this event from the parent's list
-        */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
-       list_del_init(&child_event->child_list);
-       mutex_unlock(&parent_event->child_mutex);
-
-       /*
-        * Make sure user/parent get notified, that we just
-        * lost one event.
-        */
-       perf_event_wakeup(parent_event);
-
-       /*
-        * Release the parent event, if this was the last
-        * reference to it.
-        */
-       put_event(parent_event);
 }
 
 static void
-__perf_event_exit_task(struct perf_event *child_event,
-                        struct perf_event_context *child_ctx,
-                        struct task_struct *child)
+perf_event_exit_event(struct perf_event *child_event,
+                     struct perf_event_context *child_ctx,
+                     struct task_struct *child)
 {
+       struct perf_event *parent_event = child_event->parent;
+
        /*
         * Do not destroy the 'original' grouping; because of the context
         * switch optimization the original events could've ended up in a
@@ -8712,57 +8689,86 @@ __perf_event_exit_task(struct perf_event *child_event,
         * Do destroy all inherited groups, we don't care about those
         * and being thorough is better.
         */
-       perf_remove_from_context(child_event, !!child_event->parent);
+       raw_spin_lock_irq(&child_ctx->lock);
+       WARN_ON_ONCE(child_ctx->is_active);
+
+       if (parent_event)
+               perf_group_detach(child_event);
+       list_del_event(child_event, child_ctx);
+       child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */
+       raw_spin_unlock_irq(&child_ctx->lock);
 
        /*
-        * It can happen that the parent exits first, and has events
-        * that are still around due to the child reference. These
-        * events need to be zapped.
+        * Parent events are governed by their filedesc, retain them.
         */
-       if (child_event->parent) {
-               sync_child_event(child_event, child);
-               free_event(child_event);
-       } else {
-               child_event->state = PERF_EVENT_STATE_EXIT;
+       if (!parent_event) {
                perf_event_wakeup(child_event);
+               return;
        }
+       /*
+        * Child events can be cleaned up.
+        */
+
+       sync_child_event(child_event, child);
+
+       /*
+        * Remove this event from the parent's list
+        */
+       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+       mutex_lock(&parent_event->child_mutex);
+       list_del_init(&child_event->child_list);
+       mutex_unlock(&parent_event->child_mutex);
+
+       /*
+        * Kick perf_poll() for is_event_hup().
+        */
+       perf_event_wakeup(parent_event);
+       free_event(child_event);
+       put_event(parent_event);
 }
 
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
-       struct perf_event *child_event, *next;
        struct perf_event_context *child_ctx, *clone_ctx = NULL;
-       unsigned long flags;
+       struct perf_event *child_event, *next;
+
+       WARN_ON_ONCE(child != current);
 
-       if (likely(!child->perf_event_ctxp[ctxn]))
+       child_ctx = perf_pin_task_context(child, ctxn);
+       if (!child_ctx)
                return;
 
-       local_irq_save(flags);
        /*
-        * We can't reschedule here because interrupts are disabled,
-        * and either child is current or it is a task that can't be
-        * scheduled, so we are now safe from rescheduling changing
-        * our context.
+        * In order to reduce the amount of tricky in ctx tear-down, we hold
+        * ctx::mutex over the entire thing. This serializes against almost
+        * everything that wants to access the ctx.
+        *
+        * The exception is sys_perf_event_open() /
+        * perf_event_create_kernel_count() which does find_get_context()
+        * without ctx::mutex (it cannot because of the move_group double mutex
+        * lock thing). See the comments in perf_install_in_context().
         */
-       child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
+       mutex_lock(&child_ctx->mutex);
 
        /*
-        * Take the context lock here so that if find_get_context is
-        * reading child->perf_event_ctxp, we wait until it has
-        * incremented the context's refcount before we do put_ctx below.
+        * In a single ctx::lock section, de-schedule the events and detach the
+        * context from the task such that we cannot ever get it scheduled back
+        * in.
         */
-       raw_spin_lock(&child_ctx->lock);
-       task_ctx_sched_out(child_ctx);
-       child->perf_event_ctxp[ctxn] = NULL;
+       raw_spin_lock_irq(&child_ctx->lock);
+       task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
 
        /*
-        * If this context is a clone; unclone it so it can't get
-        * swapped to another process while we're removing all
-        * the events from it.
+        * Now that the context is inactive, destroy the task <-> ctx relation
+        * and mark the context dead.
         */
+       RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
+       put_ctx(child_ctx); /* cannot be last */
+       WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
+       put_task_struct(current); /* cannot be last */
+
        clone_ctx = unclone_ctx(child_ctx);
-       update_context_time(child_ctx);
-       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+       raw_spin_unlock_irq(&child_ctx->lock);
 
        if (clone_ctx)
                put_ctx(clone_ctx);
@@ -8774,20 +8780,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         */
        perf_event_task(child, child_ctx, 0);
 
-       /*
-        * We can recurse on the same lock type through:
-        *
-        *   __perf_event_exit_task()
-        *     sync_child_event()
-        *       put_event()
-        *         mutex_lock(&ctx->mutex)
-        *
-        * But since its the parent context it won't be the same instance.
-        */
-       mutex_lock(&child_ctx->mutex);
-
        list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
-               __perf_event_exit_task(child_event, child_ctx, child);
+               perf_event_exit_event(child_event, child_ctx, child);
 
        mutex_unlock(&child_ctx->mutex);
 
@@ -8812,8 +8806,7 @@ void perf_event_exit_task(struct task_struct *child)
                 * the owner, closes a race against perf_release() where
                 * we need to serialize on the owner->perf_event_mutex.
                 */
-               smp_wmb();
-               event->owner = NULL;
+               smp_store_release(&event->owner, NULL);
        }
        mutex_unlock(&child->perf_event_mutex);
 
@@ -8896,21 +8889,20 @@ void perf_event_delayed_put(struct task_struct *task)
                WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
 }
 
-struct perf_event *perf_event_get(unsigned int fd)
+struct file *perf_event_get(unsigned int fd)
 {
-       int err;
-       struct fd f;
-       struct perf_event *event;
+       struct file *file;
 
-       err = perf_fget_light(fd, &f);
-       if (err)
-               return ERR_PTR(err);
+       file = fget_raw(fd);
+       if (!file)
+               return ERR_PTR(-EBADF);
 
-       event = f.file->private_data;
-       atomic_long_inc(&event->refcount);
-       fdput(f);
+       if (file->f_op != &perf_fops) {
+               fput(file);
+               return ERR_PTR(-EBADF);
+       }
 
-       return event;
+       return file;
 }
 
 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
@@ -8953,8 +8945,16 @@ inherit_event(struct perf_event *parent_event,
        if (IS_ERR(child_event))
                return child_event;
 
+       /*
+        * is_orphaned_event() and list_add_tail(&parent_event->child_list)
+        * must be under the same lock in order to serialize against
+        * perf_event_release_kernel(), such that either we must observe
+        * is_orphaned_event() or they will observe us on the child_list.
+        */
+       mutex_lock(&parent_event->child_mutex);
        if (is_orphaned_event(parent_event) ||
            !atomic_long_inc_not_zero(&parent_event->refcount)) {
+               mutex_unlock(&parent_event->child_mutex);
                free_event(child_event);
                return NULL;
        }
@@ -9002,8 +9002,6 @@ inherit_event(struct perf_event *parent_event,
        /*
         * Link this into the parent event's child list
         */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
        list_add_tail(&child_event->child_list, &parent_event->child_list);
        mutex_unlock(&parent_event->child_mutex);
 
@@ -9221,13 +9219,14 @@ static void perf_event_init_cpu(int cpu)
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
 static void __perf_event_exit_context(void *__info)
 {
-       struct remove_event re = { .detach_group = true };
        struct perf_event_context *ctx = __info;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct perf_event *event;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(&re);
-       rcu_read_unlock();
+       raw_spin_lock(&ctx->lock);
+       list_for_each_entry(event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
+       raw_spin_unlock(&ctx->lock);
 }
 
 static void perf_event_exit_cpu_context(int cpu)
index 92ce5f4ccc264e01a5551965d173e39e41392143..3f8cb1e14588fe3258a1c3aa49874e008f5a266a 100644 (file)
@@ -444,7 +444,7 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
         * current task.
         */
        if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
-               __perf_event_disable(bp);
+               perf_event_disable_local(bp);
        else
                perf_event_disable(bp);
 
index adfdc0536117c1f10bf8f0dd8014798011e1f855..1faad2cfdb9e4572c91431e4b11941266e23a231 100644 (file)
@@ -459,6 +459,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
        __free_page(page);
 }
 
+static void __rb_free_aux(struct ring_buffer *rb)
+{
+       int pg;
+
+       if (rb->aux_priv) {
+               rb->free_aux(rb->aux_priv);
+               rb->free_aux = NULL;
+               rb->aux_priv = NULL;
+       }
+
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
+
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
+}
+
 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                 pgoff_t pgoff, int nr_pages, long watermark, int flags)
 {
@@ -547,30 +566,11 @@ out:
        if (!ret)
                rb->aux_pgoff = pgoff;
        else
-               rb_free_aux(rb);
+               __rb_free_aux(rb);
 
        return ret;
 }
 
-static void __rb_free_aux(struct ring_buffer *rb)
-{
-       int pg;
-
-       if (rb->aux_priv) {
-               rb->free_aux(rb->aux_priv);
-               rb->free_aux = NULL;
-               rb->aux_priv = NULL;
-       }
-
-       if (rb->aux_nr_pages) {
-               for (pg = 0; pg < rb->aux_nr_pages; pg++)
-                       rb_free_aux_page(rb, pg);
-
-               kfree(rb->aux_pages);
-               rb->aux_nr_pages = 0;
-       }
-}
-
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
index 0773f2b23b107dcc3be9e5caabdbaa0a25de01d0..5d6ce6413ef1d227b32c99a4330bee1289f9f571 100644 (file)
@@ -1191,7 +1191,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
        if (pi_state->owner != current)
                return -EINVAL;
 
-       raw_spin_lock(&pi_state->pi_mutex.wait_lock);
+       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
        new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
 
        /*
@@ -1217,22 +1217,22 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
        else if (curval != uval)
                ret = -EINVAL;
        if (ret) {
-               raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+               raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
                return ret;
        }
 
-       raw_spin_lock_irq(&pi_state->owner->pi_lock);
+       raw_spin_lock(&pi_state->owner->pi_lock);
        WARN_ON(list_empty(&pi_state->list));
        list_del_init(&pi_state->list);
-       raw_spin_unlock_irq(&pi_state->owner->pi_lock);
+       raw_spin_unlock(&pi_state->owner->pi_lock);
 
-       raw_spin_lock_irq(&new_owner->pi_lock);
+       raw_spin_lock(&new_owner->pi_lock);
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &new_owner->pi_state_list);
        pi_state->owner = new_owner;
-       raw_spin_unlock_irq(&new_owner->pi_lock);
+       raw_spin_unlock(&new_owner->pi_lock);
 
-       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+       raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 
        deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
 
@@ -2127,11 +2127,11 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                 * we returned due to timeout or signal without taking the
                 * rt_mutex. Too late.
                 */
-               raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
+               raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
                owner = rt_mutex_owner(&q->pi_state->pi_mutex);
                if (!owner)
                        owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
-               raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
+               raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
                ret = fixup_pi_state_owner(uaddr, q, owner);
                goto out;
        }
index a302cf9a2126c8a911ff4da1a944c88c6d3b3f8a..57bff7857e879bf2301a92723ade0a968870e637 100644 (file)
@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
        unsigned int flags = 0, irq = desc->irq_data.irq;
        struct irqaction *action = desc->action;
 
-       do {
+       /* action might have become NULL since we dropped the lock */
+       while (action) {
                irqreturn_t res;
 
                trace_irq_handler_entry(irq, action);
@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 
                retval |= res;
                action = action->next;
-       } while (action);
+       }
 
        add_interrupt_randomness(irq, flags);
 
index 6e655f7acd3bf79b68f62fc4fbdf6806cfd11884..3e56d2f03e24e6ee147e837727e6aac7a479f9aa 100644 (file)
@@ -575,10 +575,15 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
        unsigned int type = IRQ_TYPE_NONE;
        int virq;
 
-       if (fwspec->fwnode)
-               domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY);
-       else
+       if (fwspec->fwnode) {
+               domain = irq_find_matching_fwnode(fwspec->fwnode,
+                                                 DOMAIN_BUS_WIRED);
+               if (!domain)
+                       domain = irq_find_matching_fwnode(fwspec->fwnode,
+                                                         DOMAIN_BUS_ANY);
+       } else {
                domain = irq_default_domain;
+       }
 
        if (!domain) {
                pr_warn("no irq domain found for %s !\n",
index 8251e75dd9c0bd67337754baf6f03fb2cf1f956f..3e746607abe5230bb9c650957582669c085d9a90 100644 (file)
@@ -99,13 +99,14 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  * 2) Drop lock->wait_lock
  * 3) Try to unlock the lock with cmpxchg
  */
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+                                       unsigned long flags)
        __releases(lock->wait_lock)
 {
        struct task_struct *owner = rt_mutex_owner(lock);
 
        clear_rt_mutex_waiters(lock);
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
        /*
         * If a new waiter comes in between the unlock and the cmpxchg
         * we have two situations:
@@ -147,11 +148,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 /*
  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
  */
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+                                       unsigned long flags)
        __releases(lock->wait_lock)
 {
        lock->owner = NULL;
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
        return true;
 }
 #endif
@@ -433,7 +435,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        int ret = 0, depth = 0;
        struct rt_mutex *lock;
        bool detect_deadlock;
-       unsigned long flags;
        bool requeue = true;
 
        detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
@@ -476,7 +477,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        /*
         * [1] Task cannot go away as we did a get_task() before !
         */
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock_irq(&task->pi_lock);
 
        /*
         * [2] Get the waiter on which @task is blocked on.
@@ -560,7 +561,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
         * operations.
         */
        if (!raw_spin_trylock(&lock->wait_lock)) {
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               raw_spin_unlock_irq(&task->pi_lock);
                cpu_relax();
                goto retry;
        }
@@ -591,7 +592,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                /*
                 * No requeue[7] here. Just release @task [8]
                 */
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               raw_spin_unlock(&task->pi_lock);
                put_task_struct(task);
 
                /*
@@ -599,14 +600,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                 * If there is no owner of the lock, end of chain.
                 */
                if (!rt_mutex_owner(lock)) {
-                       raw_spin_unlock(&lock->wait_lock);
+                       raw_spin_unlock_irq(&lock->wait_lock);
                        return 0;
                }
 
                /* [10] Grab the next task, i.e. owner of @lock */
                task = rt_mutex_owner(lock);
                get_task_struct(task);
-               raw_spin_lock_irqsave(&task->pi_lock, flags);
+               raw_spin_lock(&task->pi_lock);
 
                /*
                 * No requeue [11] here. We just do deadlock detection.
@@ -621,8 +622,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                top_waiter = rt_mutex_top_waiter(lock);
 
                /* [13] Drop locks */
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock(&task->pi_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
 
                /* If owner is not blocked, end of chain. */
                if (!next_lock)
@@ -643,7 +644,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        rt_mutex_enqueue(lock, waiter);
 
        /* [8] Release the task */
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock(&task->pi_lock);
        put_task_struct(task);
 
        /*
@@ -661,14 +662,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                 */
                if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
                        wake_up_process(rt_mutex_top_waiter(lock)->task);
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
                return 0;
        }
 
        /* [10] Grab the next task, i.e. the owner of @lock */
        task = rt_mutex_owner(lock);
        get_task_struct(task);
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock(&task->pi_lock);
 
        /* [11] requeue the pi waiters if necessary */
        if (waiter == rt_mutex_top_waiter(lock)) {
@@ -722,8 +723,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        top_waiter = rt_mutex_top_waiter(lock);
 
        /* [13] Drop the locks */
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&task->pi_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        /*
         * Make the actual exit decisions [12], based on the stored
@@ -746,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        goto again;
 
  out_unlock_pi:
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock_irq(&task->pi_lock);
  out_put_task:
        put_task_struct(task);
 
@@ -756,7 +757,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
 /*
  * Try to take an rt-mutex
  *
- * Must be called with lock->wait_lock held.
+ * Must be called with lock->wait_lock held and interrupts disabled
  *
  * @lock:   The lock to be acquired.
  * @task:   The task which wants to acquire the lock
@@ -766,8 +767,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
                                struct rt_mutex_waiter *waiter)
 {
-       unsigned long flags;
-
        /*
         * Before testing whether we can acquire @lock, we set the
         * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
@@ -852,7 +851,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
         * case, but conditionals are more expensive than a redundant
         * store.
         */
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock(&task->pi_lock);
        task->pi_blocked_on = NULL;
        /*
         * Finish the lock acquisition. @task is the new owner. If
@@ -861,7 +860,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
         */
        if (rt_mutex_has_waiters(lock))
                rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock(&task->pi_lock);
 
 takeit:
        /* We got the lock. */
@@ -883,7 +882,7 @@ takeit:
  *
  * Prepare waiter and propagate pi chain
  *
- * This must be called with lock->wait_lock held.
+ * This must be called with lock->wait_lock held and interrupts disabled
  */
 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
                                   struct rt_mutex_waiter *waiter,
@@ -894,7 +893,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        struct rt_mutex_waiter *top_waiter = waiter;
        struct rt_mutex *next_lock;
        int chain_walk = 0, res;
-       unsigned long flags;
 
        /*
         * Early deadlock detection. We really don't want the task to
@@ -908,7 +906,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        if (owner == task)
                return -EDEADLK;
 
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock(&task->pi_lock);
        __rt_mutex_adjust_prio(task);
        waiter->task = task;
        waiter->lock = lock;
@@ -921,12 +919,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        task->pi_blocked_on = waiter;
 
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock(&task->pi_lock);
 
        if (!owner)
                return 0;
 
-       raw_spin_lock_irqsave(&owner->pi_lock, flags);
+       raw_spin_lock(&owner->pi_lock);
        if (waiter == rt_mutex_top_waiter(lock)) {
                rt_mutex_dequeue_pi(owner, top_waiter);
                rt_mutex_enqueue_pi(owner, waiter);
@@ -941,7 +939,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        /* Store the lock on which owner is blocked or NULL */
        next_lock = task_blocked_on_lock(owner);
 
-       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       raw_spin_unlock(&owner->pi_lock);
        /*
         * Even if full deadlock detection is on, if the owner is not
         * blocked itself, we can avoid finding this out in the chain
@@ -957,12 +955,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
         */
        get_task_struct(owner);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
                                         next_lock, waiter, task);
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 
        return res;
 }
@@ -971,15 +969,14 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  * Remove the top waiter from the current tasks pi waiter tree and
  * queue it up.
  *
- * Called with lock->wait_lock held.
+ * Called with lock->wait_lock held and interrupts disabled.
  */
 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
                                    struct rt_mutex *lock)
 {
        struct rt_mutex_waiter *waiter;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&current->pi_lock, flags);
+       raw_spin_lock(&current->pi_lock);
 
        waiter = rt_mutex_top_waiter(lock);
 
@@ -1001,7 +998,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
         */
        lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
 
-       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+       raw_spin_unlock(&current->pi_lock);
 
        wake_q_add(wake_q, waiter->task);
 }
@@ -1009,7 +1006,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
 /*
  * Remove a waiter from a lock and give up
  *
- * Must be called with lock->wait_lock held and
+ * Must be called with lock->wait_lock held and interrupts disabled. I must
  * have just failed to try_to_take_rt_mutex().
  */
 static void remove_waiter(struct rt_mutex *lock,
@@ -1018,12 +1015,11 @@ static void remove_waiter(struct rt_mutex *lock,
        bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex *next_lock;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&current->pi_lock, flags);
+       raw_spin_lock(&current->pi_lock);
        rt_mutex_dequeue(lock, waiter);
        current->pi_blocked_on = NULL;
-       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+       raw_spin_unlock(&current->pi_lock);
 
        /*
         * Only update priority if the waiter was the highest priority
@@ -1032,7 +1028,7 @@ static void remove_waiter(struct rt_mutex *lock,
        if (!owner || !is_top_waiter)
                return;
 
-       raw_spin_lock_irqsave(&owner->pi_lock, flags);
+       raw_spin_lock(&owner->pi_lock);
 
        rt_mutex_dequeue_pi(owner, waiter);
 
@@ -1044,7 +1040,7 @@ static void remove_waiter(struct rt_mutex *lock,
        /* Store the lock on which owner is blocked or NULL */
        next_lock = task_blocked_on_lock(owner);
 
-       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       raw_spin_unlock(&owner->pi_lock);
 
        /*
         * Don't walk the chain, if the owner task is not blocked
@@ -1056,12 +1052,12 @@ static void remove_waiter(struct rt_mutex *lock,
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
        get_task_struct(owner);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
                                   next_lock, NULL, current);
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 }
 
 /*
@@ -1097,11 +1093,11 @@ void rt_mutex_adjust_pi(struct task_struct *task)
  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
  * @lock:               the rt_mutex to take
  * @state:              the state the task should block in (TASK_INTERRUPTIBLE
- *                      or TASK_UNINTERRUPTIBLE)
+ *                      or TASK_UNINTERRUPTIBLE)
  * @timeout:            the pre-initialized and started timer, or NULL for none
  * @waiter:             the pre-initialized rt_mutex_waiter
  *
- * lock->wait_lock must be held by the caller.
+ * Must be called with lock->wait_lock held and interrupts disabled
  */
 static int __sched
 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
@@ -1129,13 +1125,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
                                break;
                }
 
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
 
                debug_rt_mutex_print_deadlock(waiter);
 
                schedule();
 
-               raw_spin_lock(&lock->wait_lock);
+               raw_spin_lock_irq(&lock->wait_lock);
                set_current_state(state);
        }
 
@@ -1172,17 +1168,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                  enum rtmutex_chainwalk chwalk)
 {
        struct rt_mutex_waiter waiter;
+       unsigned long flags;
        int ret = 0;
 
        debug_rt_mutex_init_waiter(&waiter);
        RB_CLEAR_NODE(&waiter.pi_tree_entry);
        RB_CLEAR_NODE(&waiter.tree_entry);
 
-       raw_spin_lock(&lock->wait_lock);
+       /*
+        * Technically we could use raw_spin_[un]lock_irq() here, but this can
+        * be called in early boot if the cmpxchg() fast path is disabled
+        * (debug, no architecture support). In this case we will acquire the
+        * rtmutex with lock->wait_lock held. But we cannot unconditionally
+        * enable interrupts in that early boot case. So we need to use the
+        * irqsave/restore variants.
+        */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
        /* Try to acquire the lock again: */
        if (try_to_take_rt_mutex(lock, current, NULL)) {
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
                return 0;
        }
 
@@ -1211,7 +1216,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
         */
        fixup_rt_mutex_waiters(lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        /* Remove pending timer: */
        if (unlikely(timeout))
@@ -1227,6 +1232,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
  */
 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
+       unsigned long flags;
        int ret;
 
        /*
@@ -1238,10 +1244,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
                return 0;
 
        /*
-        * The mutex has currently no owner. Lock the wait lock and
-        * try to acquire the lock.
+        * The mutex has currently no owner. Lock the wait lock and try to
+        * acquire the lock. We use irqsave here to support early boot calls.
         */
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
        ret = try_to_take_rt_mutex(lock, current, NULL);
 
@@ -1251,7 +1257,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
         */
        fixup_rt_mutex_waiters(lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        return ret;
 }
@@ -1263,7 +1269,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
                                        struct wake_q_head *wake_q)
 {
-       raw_spin_lock(&lock->wait_lock);
+       unsigned long flags;
+
+       /* irqsave required to support early boot calls */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
        debug_rt_mutex_unlock(lock);
 
@@ -1302,10 +1311,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
         */
        while (!rt_mutex_has_waiters(lock)) {
                /* Drops lock->wait_lock ! */
-               if (unlock_rt_mutex_safe(lock) == true)
+               if (unlock_rt_mutex_safe(lock, flags) == true)
                        return false;
                /* Relock the rtmutex and try again */
-               raw_spin_lock(&lock->wait_lock);
+               raw_spin_lock_irqsave(&lock->wait_lock, flags);
        }
 
        /*
@@ -1316,7 +1325,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
         */
        mark_wakeup_next_waiter(wake_q, lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        /* check PI boosting */
        return true;
@@ -1596,10 +1605,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 {
        int ret;
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 
        if (try_to_take_rt_mutex(lock, task, NULL)) {
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
                return 1;
        }
 
@@ -1620,7 +1629,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
        if (unlikely(ret))
                remove_waiter(lock, waiter);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        debug_rt_mutex_print_deadlock(waiter);
 
@@ -1668,7 +1677,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 {
        int ret;
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 
        set_current_state(TASK_INTERRUPTIBLE);
 
@@ -1684,7 +1693,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
         */
        fixup_rt_mutex_waiters(lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        return ret;
 }
index e517a16cb426cc30b0251a43d22c5d2cb01bead9..70ee3775de24ebcf2e80ab2fdad21eaad24dd2fe 100644 (file)
@@ -150,7 +150,7 @@ void devm_memunmap(struct device *dev, void *addr)
 }
 EXPORT_SYMBOL(devm_memunmap);
 
-pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
+pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 {
        return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
 }
@@ -183,7 +183,11 @@ EXPORT_SYMBOL(put_zone_device_page);
 
 static void pgmap_radix_release(struct resource *res)
 {
-       resource_size_t key;
+       resource_size_t key, align_start, align_size, align_end;
+
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       align_end = align_start + align_size - 1;
 
        mutex_lock(&pgmap_lock);
        for (key = res->start; key <= res->end; key += SECTION_SIZE)
@@ -226,12 +230,11 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
                percpu_ref_put(pgmap->ref);
        }
 
-       pgmap_radix_release(res);
-
        /* pages are dead and unused, undo the arch mapping */
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
        arch_remove_memory(align_start, align_size);
+       pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
                        "%s: failed to free all reserved pages\n", __func__);
 }
@@ -267,7 +270,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 {
        int is_ram = region_intersects(res->start, resource_size(res),
                        "System RAM");
-       resource_size_t key, align_start, align_size;
+       resource_size_t key, align_start, align_size, align_end;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
        unsigned long pfn;
@@ -309,7 +312,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 
        mutex_lock(&pgmap_lock);
        error = 0;
-       for (key = res->start; key <= res->end; key += SECTION_SIZE) {
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       align_end = align_start + align_size - 1;
+       for (key = align_start; key <= align_end; key += SECTION_SIZE) {
                struct dev_pagemap *dup;
 
                rcu_read_lock();
@@ -336,8 +342,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (nid < 0)
                nid = numa_mem_id();
 
-       align_start = res->start & ~(SECTION_SIZE - 1);
-       align_size = ALIGN(resource_size(res), SECTION_SIZE);
        error = arch_add_memory(nid, align_start, align_size, true);
        if (error)
                goto err_add_memory;
index 8358f4697c0c3aea77aba802833660d4082b7c13..9537da37ce87fe28cf11ee97d78bfa17ddc6beb3 100644 (file)
@@ -303,6 +303,9 @@ struct load_info {
        struct _ddebug *debug;
        unsigned int num_debug;
        bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+       unsigned long mod_kallsyms_init_off;
+#endif
        struct {
                unsigned int sym, str, mod, vers, info, pcpu;
        } index;
@@ -2480,10 +2483,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
        strsect->sh_flags |= SHF_ALLOC;
        strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
                                         info->index.str) | INIT_OFFSET_MASK;
-       mod->init_layout.size = debug_align(mod->init_layout.size);
        pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+       /* We'll tack temporary mod_kallsyms on the end. */
+       mod->init_layout.size = ALIGN(mod->init_layout.size,
+                                     __alignof__(struct mod_kallsyms));
+       info->mod_kallsyms_init_off = mod->init_layout.size;
+       mod->init_layout.size += sizeof(struct mod_kallsyms);
+       mod->init_layout.size = debug_align(mod->init_layout.size);
 }
 
+/*
+ * We use the full symtab and strtab which layout_symtab arranged to
+ * be appended to the init section.  Later we switch to the cut-down
+ * core-only ones.
+ */
 static void add_kallsyms(struct module *mod, const struct load_info *info)
 {
        unsigned int i, ndst;
@@ -2492,29 +2506,34 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
        char *s;
        Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
 
-       mod->symtab = (void *)symsec->sh_addr;
-       mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+       /* Set up to point into init section. */
+       mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
+
+       mod->kallsyms->symtab = (void *)symsec->sh_addr;
+       mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
        /* Make sure we get permanent strtab: don't use info->strtab. */
-       mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+       mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
        /* Set types up while we still have access to sections. */
-       for (i = 0; i < mod->num_symtab; i++)
-               mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
-
-       mod->core_symtab = dst = mod->core_layout.base + info->symoffs;
-       mod->core_strtab = s = mod->core_layout.base + info->stroffs;
-       src = mod->symtab;
-       for (ndst = i = 0; i < mod->num_symtab; i++) {
+       for (i = 0; i < mod->kallsyms->num_symtab; i++)
+               mod->kallsyms->symtab[i].st_info
+                       = elf_type(&mod->kallsyms->symtab[i], info);
+
+       /* Now populate the cut down core kallsyms for after init. */
+       mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
+       mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
+       src = mod->kallsyms->symtab;
+       for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
                if (i == 0 ||
                    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
                                   info->index.pcpu)) {
                        dst[ndst] = src[i];
-                       dst[ndst++].st_name = s - mod->core_strtab;
-                       s += strlcpy(s, &mod->strtab[src[i].st_name],
+                       dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+                       s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
                                     KSYM_NAME_LEN) + 1;
                }
        }
-       mod->core_num_syms = ndst;
+       mod->core_kallsyms.num_symtab = ndst;
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3263,9 +3282,8 @@ static noinline int do_init_module(struct module *mod)
        module_put(mod);
        trim_init_extable(mod);
 #ifdef CONFIG_KALLSYMS
-       mod->num_symtab = mod->core_num_syms;
-       mod->symtab = mod->core_symtab;
-       mod->strtab = mod->core_strtab;
+       /* Switch to core kallsyms now init is done: kallsyms may be walking! */
+       rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
 #endif
        mod_tree_remove_init(mod);
        disable_ro_nx(&mod->init_layout);
@@ -3496,7 +3514,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        /* Module is ready to execute: parsing args may do that. */
        after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-                                 -32768, 32767, NULL,
+                                 -32768, 32767, mod,
                                  unknown_module_param_cb);
        if (IS_ERR(after_dashes)) {
                err = PTR_ERR(after_dashes);
@@ -3627,6 +3645,11 @@ static inline int is_arm_mapping_symbol(const char *str)
               && (str[2] == '\0' || str[2] == '.');
 }
 
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+{
+       return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+}
+
 static const char *get_ksymbol(struct module *mod,
                               unsigned long addr,
                               unsigned long *size,
@@ -3634,6 +3657,7 @@ static const char *get_ksymbol(struct module *mod,
 {
        unsigned int i, best = 0;
        unsigned long nextval;
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
        /* At worse, next value is at end of module */
        if (within_module_init(addr, mod))
@@ -3643,32 +3667,32 @@ static const char *get_ksymbol(struct module *mod,
 
        /* Scan for closest preceding symbol, and next symbol. (ELF
           starts real symbols at 1). */
-       for (i = 1; i < mod->num_symtab; i++) {
-               if (mod->symtab[i].st_shndx == SHN_UNDEF)
+       for (i = 1; i < kallsyms->num_symtab; i++) {
+               if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
                        continue;
 
                /* We ignore unnamed symbols: they're uninformative
                 * and inserted at a whim. */
-               if (mod->symtab[i].st_value <= addr
-                   && mod->symtab[i].st_value > mod->symtab[best].st_value
-                   && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-                   && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+               if (*symname(kallsyms, i) == '\0'
+                   || is_arm_mapping_symbol(symname(kallsyms, i)))
+                       continue;
+
+               if (kallsyms->symtab[i].st_value <= addr
+                   && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
                        best = i;
-               if (mod->symtab[i].st_value > addr
-                   && mod->symtab[i].st_value < nextval
-                   && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-                   && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
-                       nextval = mod->symtab[i].st_value;
+               if (kallsyms->symtab[i].st_value > addr
+                   && kallsyms->symtab[i].st_value < nextval)
+                       nextval = kallsyms->symtab[i].st_value;
        }
 
        if (!best)
                return NULL;
 
        if (size)
-               *size = nextval - mod->symtab[best].st_value;
+               *size = nextval - kallsyms->symtab[best].st_value;
        if (offset)
-               *offset = addr - mod->symtab[best].st_value;
-       return mod->strtab + mod->symtab[best].st_name;
+               *offset = addr - kallsyms->symtab[best].st_value;
+       return symname(kallsyms, best);
 }
 
 /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
@@ -3758,19 +3782,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               struct mod_kallsyms *kallsyms;
+
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               if (symnum < mod->num_symtab) {
-                       *value = mod->symtab[symnum].st_value;
-                       *type = mod->symtab[symnum].st_info;
-                       strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
-                               KSYM_NAME_LEN);
+               kallsyms = rcu_dereference_sched(mod->kallsyms);
+               if (symnum < kallsyms->num_symtab) {
+                       *value = kallsyms->symtab[symnum].st_value;
+                       *type = kallsyms->symtab[symnum].st_info;
+                       strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
                        strlcpy(module_name, mod->name, MODULE_NAME_LEN);
                        *exported = is_exported(name, *value, mod);
                        preempt_enable();
                        return 0;
                }
-               symnum -= mod->num_symtab;
+               symnum -= kallsyms->num_symtab;
        }
        preempt_enable();
        return -ERANGE;
@@ -3779,11 +3805,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 static unsigned long mod_find_symname(struct module *mod, const char *name)
 {
        unsigned int i;
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
-       for (i = 0; i < mod->num_symtab; i++)
-               if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
-                   mod->symtab[i].st_info != 'U')
-                       return mod->symtab[i].st_value;
+       for (i = 0; i < kallsyms->num_symtab; i++)
+               if (strcmp(name, symname(kallsyms, i)) == 0 &&
+                   kallsyms->symtab[i].st_info != 'U')
+                       return kallsyms->symtab[i].st_value;
        return 0;
 }
 
@@ -3822,11 +3849,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
        module_assert_mutex();
 
        list_for_each_entry(mod, &modules, list) {
+               /* We hold module_mutex: no need for rcu_dereference_sched */
+               struct mod_kallsyms *kallsyms = mod->kallsyms;
+
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               for (i = 0; i < mod->num_symtab; i++) {
-                       ret = fn(data, mod->strtab + mod->symtab[i].st_name,
-                                mod, mod->symtab[i].st_value);
+               for (i = 0; i < kallsyms->num_symtab; i++) {
+                       ret = fn(data, symname(kallsyms, i),
+                                mod, kallsyms->symtab[i].st_value);
                        if (ret != 0)
                                return ret;
                }
index f4ad91b746f1e4afd96b3615a4bad844a0875528..4d73a834c7e6590e29eb0d1a71645caf6c274016 100644 (file)
@@ -588,7 +588,7 @@ void __init pidhash_init(void)
 
 void __init pidmap_init(void)
 {
-       /* Veryify no one has done anything silly */
+       /* Verify no one has done anything silly: */
        BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
 
        /* bump default and minimum pid_max based on number of cpus */
index 02e8dfaa1ce2d2085bd85e165ad6f25b9697a11a..68d3ebc12601b6bdea4f9d2c6cfca997a0b6a0dc 100644 (file)
@@ -235,7 +235,7 @@ config PM_TRACE_RTC
 
 config APM_EMULATION
        tristate "Advanced Power Management Emulation"
-       depends on PM && SYS_SUPPORTS_APM_EMULATION
+       depends on SYS_SUPPORTS_APM_EMULATION
        help
          APM is a BIOS specification for saving power using several different
          techniques. This is mostly useful for battery powered laptops with
index 63d3a24e081ad311b59152d884209c7d1aa81c70..9503d590e5ef5b81537947b9925ecfe689f72a91 100644 (file)
@@ -6840,7 +6840,7 @@ static void sched_init_numa(void)
 
                        sched_domains_numa_masks[i][j] = mask;
 
-                       for (k = 0; k < nr_node_ids; k++) {
+                       for_each_node(k) {
                                if (node_distance(j, k) > sched_domains_numa_distance[i])
                                        continue;
 
index 1926606ece807361ab02ef51f4864543640e8d8d..56b7d4b839476b6ed1692e786abe9ed6cda64a5f 100644 (file)
@@ -1220,8 +1220,6 @@ static void task_numa_assign(struct task_numa_env *env,
 {
        if (env->best_task)
                put_task_struct(env->best_task);
-       if (p)
-               get_task_struct(p);
 
        env->best_task = p;
        env->best_imp = imp;
@@ -1289,20 +1287,30 @@ static void task_numa_compare(struct task_numa_env *env,
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
        int dist = env->dist;
+       bool assigned = false;
 
        rcu_read_lock();
 
        raw_spin_lock_irq(&dst_rq->lock);
        cur = dst_rq->curr;
        /*
-        * No need to move the exiting task, and this ensures that ->curr
-        * wasn't reaped and thus get_task_struct() in task_numa_assign()
-        * is safe under RCU read lock.
-        * Note that rcu_read_lock() itself can't protect from the final
-        * put_task_struct() after the last schedule().
+        * No need to move the exiting task or idle task.
         */
        if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       else {
+               /*
+                * The task_struct must be protected here to protect the
+                * p->numa_faults access in the task_weight since the
+                * numa_faults could already be freed in the following path:
+                * finish_task_switch()
+                *     --> put_task_struct()
+                *         --> __put_task_struct()
+                *             --> task_numa_free()
+                */
+               get_task_struct(cur);
+       }
+
        raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
@@ -1386,6 +1394,7 @@ balance:
                 */
                if (!load_too_imbalanced(src_load, dst_load, env)) {
                        imp = moveimp - 1;
+                       put_task_struct(cur);
                        cur = NULL;
                        goto assign;
                }
@@ -1411,9 +1420,16 @@ balance:
                env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
 
 assign:
+       assigned = true;
        task_numa_assign(env, cur, imp);
 unlock:
        rcu_read_unlock();
+       /*
+        * The dst_rq->curr isn't assigned. The protection for task_struct is
+        * finished.
+        */
+       if (cur && !assigned)
+               put_task_struct(cur);
 }
 
 static void task_numa_find_cpu(struct task_numa_env *env,
index de0e786b26671b633ccaa56d9ebba6ce7b9d77cf..544a7133cbd1df2a6ee4369231148fc0309eae11 100644 (file)
@@ -162,7 +162,7 @@ static void cpuidle_idle_call(void)
         */
        if (idle_should_freeze()) {
                entered_state = cpuidle_enter_freeze(drv, dev);
-               if (entered_state >= 0) {
+               if (entered_state > 0) {
                        local_irq_enable();
                        goto exit_idle;
                }
index 580ac2d4024ffbdb29960ccdd86424fa730b1e78..15a1795bbba17d1140e3220c41d48dd26ac43c95 100644 (file)
@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
                put_seccomp_filter(thread);
                smp_store_release(&thread->seccomp.filter,
                                  caller->seccomp.filter);
+
+               /*
+                * Don't let an unprivileged task work around
+                * the no_new_privs restriction by creating
+                * a thread that sets it up, enters seccomp,
+                * then dies.
+                */
+               if (task_no_new_privs(caller))
+                       task_set_no_new_privs(thread);
+
                /*
                 * Opt the other thread into seccomp if needed.
                 * As threads are considered to be trust-realm
                 * equivalent (see ptrace_may_access), it is safe to
                 * allow one thread to transition the other.
                 */
-               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
-                       /*
-                        * Don't let an unprivileged task work around
-                        * the no_new_privs restriction by creating
-                        * a thread that sets it up, enters seccomp,
-                        * then dies.
-                        */
-                       if (task_no_new_privs(caller))
-                               task_set_no_new_privs(thread);
-
+               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
                        seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
-               }
        }
 }
 
index f3f1f7a972fd40f3d437d6bf3b5db2cacaffe6ca..0508544c8ced0d96913905dc53af68f38b6ee618 100644 (file)
@@ -3508,8 +3508,10 @@ static int sigsuspend(sigset_t *set)
        current->saved_sigmask = current->blocked;
        set_current_blocked(set);
 
-       __set_current_state(TASK_INTERRUPTIBLE);
-       schedule();
+       while (!signal_pending(current)) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
        set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
index 435b8850dd80a300c11ad128bc0cf51e3c23e15a..fa909f9fd5591801968523032e00f8fe816fb23f 100644 (file)
@@ -897,10 +897,10 @@ static int enqueue_hrtimer(struct hrtimer *timer,
  */
 static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
-                            unsigned long newstate, int reprogram)
+                            u8 newstate, int reprogram)
 {
        struct hrtimer_cpu_base *cpu_base = base->cpu_base;
-       unsigned int state = timer->state;
+       u8 state = timer->state;
 
        timer->state = newstate;
        if (!(state & HRTIMER_STATE_ENQUEUED))
@@ -930,7 +930,7 @@ static inline int
 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
 {
        if (hrtimer_is_queued(timer)) {
-               unsigned long state = timer->state;
+               u8 state = timer->state;
                int reprogram;
 
                /*
@@ -954,6 +954,22 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
        return 0;
 }
 
+static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
+                                           const enum hrtimer_mode mode)
+{
+#ifdef CONFIG_TIME_LOW_RES
+       /*
+        * CONFIG_TIME_LOW_RES indicates that the system has no way to return
+        * granular time values. For relative timers we add hrtimer_resolution
+        * (i.e. one jiffie) to prevent short timeouts.
+        */
+       timer->is_rel = mode & HRTIMER_MODE_REL;
+       if (timer->is_rel)
+               tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
+#endif
+       return tim;
+}
+
 /**
  * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  * @timer:     the timer to be added
@@ -974,19 +990,10 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Remove an active timer from the queue: */
        remove_hrtimer(timer, base, true);
 
-       if (mode & HRTIMER_MODE_REL) {
+       if (mode & HRTIMER_MODE_REL)
                tim = ktime_add_safe(tim, base->get_time());
-               /*
-                * CONFIG_TIME_LOW_RES is a temporary way for architectures
-                * to signal that they simply return xtime in
-                * do_gettimeoffset(). In this case we want to round up by
-                * resolution when starting a relative timer, to avoid short
-                * timeouts. This will go away with the GTOD framework.
-                */
-#ifdef CONFIG_TIME_LOW_RES
-               tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
-#endif
-       }
+
+       tim = hrtimer_update_lowres(timer, tim, mode);
 
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
@@ -1074,19 +1081,23 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
 /**
  * hrtimer_get_remaining - get remaining time for the timer
  * @timer:     the timer to read
+ * @adjust:    adjust relative timers when CONFIG_TIME_LOW_RES=y
  */
-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
 {
        unsigned long flags;
        ktime_t rem;
 
        lock_hrtimer_base(timer, &flags);
-       rem = hrtimer_expires_remaining(timer);
+       if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
+               rem = hrtimer_expires_remaining_adjusted(timer);
+       else
+               rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
        return rem;
 }
-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
+EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
 
 #ifdef CONFIG_NO_HZ_COMMON
 /**
@@ -1219,6 +1230,14 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
        timer_stats_account_hrtimer(timer);
        fn = timer->function;
 
+       /*
+        * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
+        * timer is restarted with a period then it becomes an absolute
+        * timer. If its not restarted it does not matter.
+        */
+       if (IS_ENABLED(CONFIG_TIME_LOW_RES))
+               timer->is_rel = false;
+
        /*
         * Because we run timers from hardirq context, there is no chance
         * they get migrated to another cpu, therefore its safe to unlock
index 8d262b4675738d21bea752d65a47117c3d8ae514..1d5c7204ddc97bce881431b399e58741961c6391 100644 (file)
@@ -26,7 +26,7 @@
  */
 static struct timeval itimer_get_remtime(struct hrtimer *timer)
 {
-       ktime_t rem = hrtimer_get_remaining(timer);
+       ktime_t rem = __hrtimer_get_remaining(timer, true);
 
        /*
         * Racy but safe: if the itimer expires after the above
index 36f2ca09aa5e458bf1f31e3ec018e180dcc5b345..6df8927c58a5bbedcab9efceac7c21985bec0bf0 100644 (file)
@@ -685,8 +685,18 @@ int ntp_validate_timex(struct timex *txc)
                if (!capable(CAP_SYS_TIME))
                        return -EPERM;
 
-               if (!timeval_inject_offset_valid(&txc->time))
-                       return -EINVAL;
+               if (txc->modes & ADJ_NANO) {
+                       struct timespec ts;
+
+                       ts.tv_sec = txc->time.tv_sec;
+                       ts.tv_nsec = txc->time.tv_usec;
+                       if (!timespec_inject_offset_valid(&ts))
+                               return -EINVAL;
+
+               } else {
+                       if (!timeval_inject_offset_valid(&txc->time))
+                               return -EINVAL;
+               }
        }
 
        /*
index 31d11ac9fa4739789728c44470b82115ec307d11..f2826c35e9185f60586f06dff53f51ca3324c659 100644 (file)
@@ -760,7 +760,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
            (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
                timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
 
-       remaining = ktime_sub(hrtimer_get_expires(timer), now);
+       remaining = __hrtimer_expires_remaining_adjusted(timer, now);
        /* Return 0 only, when the timer is expired and not pending */
        if (remaining.tv64 <= 0) {
                /*
index 9d7a053545f5aca7a324f3530ee52ca9dac413f8..0b17424349eb4dafd76a2cf588748988ce51a295 100644 (file)
  */
 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
 
-/*
- * The time, when the last jiffy update happened. Protected by jiffies_lock.
- */
-static ktime_t last_jiffies_update;
-
 struct tick_sched *tick_get_tick_sched(int cpu)
 {
        return &per_cpu(tick_cpu_sched, cpu);
 }
 
+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+/*
+ * The time, when the last jiffy update happened. Protected by jiffies_lock.
+ */
+static ktime_t last_jiffies_update;
+
 /*
  * Must be called with interrupts disabled !
  */
@@ -151,6 +152,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
        update_process_times(user_mode(regs));
        profile_tick(CPU_PROFILING);
 }
+#endif
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
@@ -993,9 +995,9 @@ static void tick_nohz_switch_to_nohz(void)
        /* Get the next period */
        next = tick_init_jiffy_update();
 
-       hrtimer_forward_now(&ts->sched_timer, tick_period);
        hrtimer_set_expires(&ts->sched_timer, next);
-       tick_program_event(next, 1);
+       hrtimer_forward_now(&ts->sched_timer, tick_period);
+       tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
        tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
index f75e35b6014900da71ffa438a507c34f19e36e3a..ba7d8b288bb37250ce9e9c0e5f562b45832c4ca9 100644 (file)
@@ -69,7 +69,7 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
        print_name_offset(m, taddr);
        SEQ_printf(m, ", ");
        print_name_offset(m, timer->function);
-       SEQ_printf(m, ", S:%02lx", timer->state);
+       SEQ_printf(m, ", S:%02x", timer->state);
 #ifdef CONFIG_TIMER_STATS
        SEQ_printf(m, ", ");
        print_name_offset(m, timer->start_site);
index 45dd798bcd37e7bd529e61932e51a30366590626..326a75e884dbf3f46513538861a028c7be459f09 100644 (file)
@@ -191,14 +191,17 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        struct perf_event *event;
+       struct file *file;
 
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       event = (struct perf_event *)array->ptrs[index];
-       if (!event)
+       file = (struct file *)array->ptrs[index];
+       if (unlikely(!file))
                return -ENOENT;
 
+       event = file->private_data;
+
        /* make sure event is local and doesn't have pmu::count */
        if (event->oncpu != smp_processor_id() ||
            event->pmu->count)
@@ -228,6 +231,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
        void *data = (void *) (long) r4;
        struct perf_sample_data sample_data;
        struct perf_event *event;
+       struct file *file;
        struct perf_raw_record raw = {
                .size = size,
                .data = data,
@@ -236,10 +240,12 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       event = (struct perf_event *)array->ptrs[index];
-       if (unlikely(!event))
+       file = (struct file *)array->ptrs[index];
+       if (unlikely(!file))
                return -ENOENT;
 
+       event = file->private_data;
+
        if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
                     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
                return -EINVAL;
index eb4220a132ecd1be9002cf35edf82870969d6c82..81b87451c0ea145f93ba41e47dacc6a2acec8189 100644 (file)
@@ -15,4 +15,5 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
 
index 87fb9801bd9eead0d6e45bef49bc1eefc324c6a7..d9293402ee685ae59007fe1ade71866e121a0882 100644 (file)
@@ -1751,7 +1751,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 {
        __buffer_unlock_commit(buffer, event);
 
-       ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
+       ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
        ftrace_trace_userstack(buffer, flags, pc);
 }
 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
index dda9e6742950305f36fbe920f9fe0c6f68d83fbf..202df6cffccaab333c02facc56b28082b09e5e42 100644 (file)
@@ -125,6 +125,13 @@ check_stack(unsigned long ip, unsigned long *stack)
                        break;
        }
 
+       /*
+        * Some archs may not have the passed in ip in the dump.
+        * If that happens, we need to show everything.
+        */
+       if (i == stack_trace_max.nr_entries)
+               i = 0;
+
        /*
         * Now find where in the stack these are.
         */
index 61a0264e28f9b5917c0e8a60bb9668b21e59c4b2..7ff5dc7d2ac5f47395bc3be8484c642c5d577b6b 100644 (file)
@@ -301,7 +301,23 @@ static DEFINE_SPINLOCK(wq_mayday_lock);    /* protects wq->maydays list */
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
 
-static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */
+/* PL: allowable cpus for unbound wqs and work items */
+static cpumask_var_t wq_unbound_cpumask;
+
+/* CPU where unbound work was last round robin scheduled from this CPU */
+static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+
+/*
+ * Local execution of unbound work items is no longer guaranteed.  The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 
 /* the per-cpu worker pools */
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -570,6 +586,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
                                                  int node)
 {
        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+       /*
+        * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+        * delayed item is pending.  The plan is to keep CPU -> NODE
+        * mapping valid and stable across CPU on/offlines.  Once that
+        * happens, this workaround can be removed.
+        */
+       if (unlikely(node == NUMA_NO_NODE))
+               return wq->dfl_pwq;
+
        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 }
 
@@ -1298,6 +1324,39 @@ static bool is_chained_work(struct workqueue_struct *wq)
        return worker && worker->current_pwq->wq == wq;
 }
 
+/*
+ * When queueing an unbound work item to a wq, prefer local CPU if allowed
+ * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
+ * avoid perturbing sensitive tasks.
+ */
+static int wq_select_unbound_cpu(int cpu)
+{
+       static bool printed_dbg_warning;
+       int new_cpu;
+
+       if (likely(!wq_debug_force_rr_cpu)) {
+               if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+                       return cpu;
+       } else if (!printed_dbg_warning) {
+               pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+               printed_dbg_warning = true;
+       }
+
+       if (cpumask_empty(wq_unbound_cpumask))
+               return cpu;
+
+       new_cpu = __this_cpu_read(wq_rr_cpu_last);
+       new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
+       if (unlikely(new_cpu >= nr_cpu_ids)) {
+               new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
+               if (unlikely(new_cpu >= nr_cpu_ids))
+                       return cpu;
+       }
+       __this_cpu_write(wq_rr_cpu_last, new_cpu);
+
+       return new_cpu;
+}
+
 static void __queue_work(int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
@@ -1323,7 +1382,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                return;
 retry:
        if (req_cpu == WORK_CPU_UNBOUND)
-               cpu = raw_smp_processor_id();
+               cpu = wq_select_unbound_cpu(raw_smp_processor_id());
 
        /* pwq which will be used unless @work is executing elsewhere */
        if (!(wq->flags & WQ_UNBOUND))
@@ -1464,13 +1523,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        timer_stats_timer_set_start_info(&dwork->timer);
 
        dwork->wq = wq;
-       /* timer isn't guaranteed to run in this cpu, record earlier */
-       if (cpu == WORK_CPU_UNBOUND)
-               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       add_timer_on(timer, cpu);
+       if (unlikely(cpu != WORK_CPU_UNBOUND))
+               add_timer_on(timer, cpu);
+       else
+               add_timer(timer);
 }
 
 /**
@@ -2355,7 +2414,8 @@ static void check_flush_dependency(struct workqueue_struct *target_wq,
        WARN_ONCE(current->flags & PF_MEMALLOC,
                  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
                  current->pid, current->comm, target_wq->name, target_func);
-       WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM),
+       WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
+                             (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
                  "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
                  worker->current_pwq->wq->name, worker->current_func,
                  target_wq->name, target_func);
index ecb9e75614bf87f1e368074d6ef84bfe3f1003b8..8bfd1aca7a3d01a9887700b3f90d1d5697a9dab4 100644 (file)
@@ -1400,6 +1400,21 @@ config RCU_EQS_DEBUG
 
 endmenu # "RCU Debugging"
 
+config DEBUG_WQ_FORCE_RR_CPU
+       bool "Force round-robin CPU selection for unbound work items"
+       depends on DEBUG_KERNEL
+       default n
+       help
+         Workqueue used to implicitly guarantee that work items queued
+         without explicit CPU specified are put on the local CPU.  This
+         guarantee is no longer true and while local CPU is still
+         preferred work items may be put on foreign CPUs.  Kernel
+         parameter "workqueue.debug_force_rr_cpu" is added to force
+         round-robin CPU selection to flush out usages which depend on the
+         now broken guarantee.  This config option enables the debug
+         feature by default.  When enabled, memory and cache locality will
+         be impacted.
+
 config DEBUG_BLOCK_EXT_DEVT
         bool "Force extended block device numbers and spread them"
        depends on DEBUG_KERNEL
index 547f7f923dbcbd24f8bb99e13ef7c13db947b8dd..519b5a10fd704dd412c7cc9c7327fcb2da3c6f0e 100644 (file)
@@ -21,7 +21,7 @@
 #define ODEBUG_HASH_BITS       14
 #define ODEBUG_HASH_SIZE       (1 << ODEBUG_HASH_BITS)
 
-#define ODEBUG_POOL_SIZE       512
+#define ODEBUG_POOL_SIZE       1024
 #define ODEBUG_POOL_MIN_LEVEL  256
 
 #define ODEBUG_CHUNK_SHIFT     PAGE_SHIFT
index 6745c6230db3403629048256968443f51b777655..c30d07e99dba4cc32be6aeb4d353d79058509b4b 100644 (file)
@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
 
 asmlinkage __visible void dump_stack(void)
 {
+       unsigned long flags;
        int was_locked;
        int old;
        int cpu;
@@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
         * Permit this cpu to perform nested stack dumps while serialising
         * against other CPUs
         */
-       preempt_disable();
-
 retry:
+       local_irq_save(flags);
        cpu = smp_processor_id();
        old = atomic_cmpxchg(&dump_lock, -1, cpu);
        if (old == -1) {
@@ -43,6 +43,7 @@ retry:
        } else if (old == cpu) {
                was_locked = 1;
        } else {
+               local_irq_restore(flags);
                cpu_relax();
                goto retry;
        }
@@ -52,7 +53,7 @@ retry:
        if (!was_locked)
                atomic_set(&dump_lock, -1);
 
-       preempt_enable();
+       local_irq_restore(flags);
 }
 #else
 asmlinkage __visible void dump_stack(void)
index d74cf7a29afdb043112fee5c9ad7b37a631a9985..0507fa5d84c534917d0842a453bdbcaba386422a 100644 (file)
@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
                          struct klist_node *n)
 {
        i->i_klist = k;
-       i->i_cur = n;
-       if (n)
-               kref_get(&n->n_ref);
+       i->i_cur = NULL;
+       if (n && kref_get_unless_zero(&n->n_ref))
+               i->i_cur = n;
 }
 EXPORT_SYMBOL_GPL(klist_iter_init_node);
 
index fcf5d98574ce46871dca087d2c803dbfb67c0b81..6b79e9026e24894000a2bdf4180db80f8190b357 100644 (file)
@@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                return 0;
 
        radix_tree_for_each_slot(slot, root, &iter, first_index) {
-               results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+               results[ret] = rcu_dereference_raw(*slot);
                if (!results[ret])
                        continue;
+               if (radix_tree_is_indirect_ptr(results[ret])) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
                if (++ret == max_items)
                        break;
        }
@@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
                return 0;
 
        radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
-               results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+               results[ret] = rcu_dereference_raw(*slot);
                if (!results[ret])
                        continue;
+               if (radix_tree_is_indirect_ptr(results[ret])) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
                if (++ret == max_items)
                        break;
        }
index bafa9933fa768d6f76e4ade1296c873a3c38c139..004fc70fc56a3d06947f9e89c60e40e272ce551c 100644 (file)
@@ -598,9 +598,9 @@ EXPORT_SYMBOL(sg_miter_next);
  *
  * Description:
  *   Stops mapping iterator @miter.  @miter should have been started
- *   started using sg_miter_start().  A stopped iteration can be
- *   resumed by calling sg_miter_next() on it.  This is useful when
- *   resources (kmap) need to be released during iteration.
+ *   using sg_miter_start().  A stopped iteration can be resumed by
+ *   calling sg_miter_next() on it.  This is useful when resources (kmap)
+ *   need to be released during iteration.
  *
  * Context:
  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
index 98866a770770c8bba0acf7bdbacea4699d9f14bf..25b5cbfb7615bd63da19677c877b9e49c3f519e3 100644 (file)
@@ -327,36 +327,67 @@ out:
 }
 
 #define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, units, exp_result)            \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2)    \
        do {                                                                   \
-               BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf);    \
-               __test_string_get_size((size), (blk_size), (units),            \
-                                      (exp_result));                          \
+               BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf);  \
+               BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf);   \
+               __test_string_get_size((size), (blk_size), (exp_result10),     \
+                                      (exp_result2));                         \
        } while (0)
 
 
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
-                                         const enum string_size_units units,
-                                         const char *exp_result)
+static __init void test_string_get_size_check(const char *units,
+                                             const char *exp,
+                                             char *res,
+                                             const u64 size,
+                                             const u64 blk_size)
 {
-       char buf[string_get_size_maxbuf];
-
-       string_get_size(size, blk_size, units, buf, sizeof(buf));
-       if (!memcmp(buf, exp_result, strlen(exp_result) + 1))
+       if (!memcmp(res, exp, strlen(exp) + 1))
                return;
 
-       buf[sizeof(buf) - 1] = '\0';
-       pr_warn("Test 'test_string_get_size_one' failed!\n");
-       pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n",
+       res[string_get_size_maxbuf - 1] = '\0';
+
+       pr_warn("Test 'test_string_get_size' failed!\n");
+       pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
                size, blk_size, units);
-       pr_warn("expected: '%s', got '%s'\n", exp_result, buf);
+       pr_warn("expected: '%s', got '%s'\n", exp, res);
+}
+
+static __init void __test_string_get_size(const u64 size, const u64 blk_size,
+                                         const char *exp_result10,
+                                         const char *exp_result2)
+{
+       char buf10[string_get_size_maxbuf];
+       char buf2[string_get_size_maxbuf];
+
+       string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
+       string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+
+       test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
+                                  size, blk_size);
+
+       test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
+                                  size, blk_size);
 }
 
 static __init void test_string_get_size(void)
 {
-       test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB");
-       test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB");
-       test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B");
+       /* small values */
+       test_string_get_size_one(0, 512, "0 B", "0 B");
+       test_string_get_size_one(1, 512, "512 B", "512 B");
+       test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
+
+       /* normal values */
+       test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
+       test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
+       test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
+
+       /* weird block sizes */
+       test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
+
+       /* huge values */
+       test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
+       test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
 }
 
 static int __init test_string_helpers_init(void)
index 97a4e06b15c00dfd7f8b8bc0f1e1e4610b769938..03cbfa072f42a7378ed57b8b1060641b31e7d97e 100644 (file)
@@ -624,7 +624,7 @@ config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        bool
 
 config DEFERRED_STRUCT_PAGE_INIT
-       bool "Defer initialisation of struct pages to kswapd"
+       bool "Defer initialisation of struct pages to kthreads"
        default n
        depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        depends on MEMORY_HOTPLUG
@@ -633,9 +633,10 @@ config DEFERRED_STRUCT_PAGE_INIT
          single thread. On very large machines this can take a considerable
          amount of time. If this option is set, large machines will bring up
          a subset of memmap at boot and then initialise the rest in parallel
-         when kswapd starts. This has a potential performance impact on
-         processes running early in the lifetime of the systemm until kswapd
-         finishes the initialisation.
+         by starting one-off "pgdatinitX" kernel thread for each node X. This
+         has a potential performance impact on processes running early in the
+         lifetime of the system until these kthreads finish the
+         initialisation.
 
 config IDLE_PAGE_TRACKING
        bool "Enable idle page tracking"
index cc5d29d2da9b4b36be3fde383267ff10c59e1422..926c76d56388d12ea723b68add1e5b7ab6613842 100644 (file)
@@ -989,7 +989,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
                 * here rather than calling cond_resched().
                 */
                if (current->flags & PF_WQ_WORKER)
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                else
                        cond_resched();
 
index 8fc50811119b28b54f15ac875ddc956217acc570..ba5d8f3e6d68a3769589c372adc3183b4addfb40 100644 (file)
@@ -22,7 +22,7 @@
  * cleancache_ops is set by cleancache_register_ops to contain the pointers
  * to the cleancache "backend" implementation functions.
  */
-static struct cleancache_ops *cleancache_ops __read_mostly;
+static const struct cleancache_ops *cleancache_ops __read_mostly;
 
 /*
  * Counters available via /sys/kernel/debug/cleancache (if debugfs is
@@ -49,7 +49,7 @@ static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
 /*
  * Register operations for cleancache. Returns 0 on success.
  */
-int cleancache_register_ops(struct cleancache_ops *ops)
+int cleancache_register_ops(const struct cleancache_ops *ops)
 {
        if (cmpxchg(&cleancache_ops, NULL, ops))
                return -EBUSY;
index b64a36175884e07604b0e216bc2d545a2892dcb7..7bf19ffa21999c13fa1f24dc01a6bda77217688c 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,10 +430,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
                         * Anon pages in shared mappings are surprising: now
                         * just reject it.
                         */
-                       if (!is_cow_mapping(vm_flags)) {
-                               WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
+                       if (!is_cow_mapping(vm_flags))
                                return -EFAULT;
-                       }
                }
        } else if (!(vm_flags & VM_READ)) {
                if (!(gup_flags & FOLL_FORCE))
index fd3a07b3e6f4e086b2111c312a78512bed927f9b..aea8f7a42df97d7185f626d5bbc445c64f376eb1 100644 (file)
@@ -138,9 +138,6 @@ static struct khugepaged_scan khugepaged_scan = {
        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
-static DEFINE_SPINLOCK(split_queue_lock);
-static LIST_HEAD(split_queue);
-static unsigned long split_queue_len;
 static struct shrinker deferred_split_shrinker;
 
 static void set_recommended_min_free_kbytes(void)
@@ -861,7 +858,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                return false;
        entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_mkhuge(entry);
-       pgtable_trans_huge_deposit(mm, pmd, pgtable);
+       if (pgtable)
+               pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
        atomic_long_inc(&mm->nr_ptes);
        return true;
@@ -1039,13 +1037,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        spinlock_t *dst_ptl, *src_ptl;
        struct page *src_page;
        pmd_t pmd;
-       pgtable_t pgtable;
+       pgtable_t pgtable = NULL;
        int ret;
 
-       ret = -ENOMEM;
-       pgtable = pte_alloc_one(dst_mm, addr);
-       if (unlikely(!pgtable))
-               goto out;
+       if (!vma_is_dax(vma)) {
+               ret = -ENOMEM;
+               pgtable = pte_alloc_one(dst_mm, addr);
+               if (unlikely(!pgtable))
+                       goto out;
+       }
 
        dst_ptl = pmd_lock(dst_mm, dst_pmd);
        src_ptl = pmd_lockptr(src_mm, src_pmd);
@@ -1076,7 +1076,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                goto out_unlock;
        }
 
-       if (pmd_trans_huge(pmd)) {
+       if (!vma_is_dax(vma)) {
                /* thp accounting separate from pmd_devmap accounting */
                src_page = pmd_page(pmd);
                VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
@@ -2860,6 +2860,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        young = pmd_young(*pmd);
        dirty = pmd_dirty(*pmd);
 
+       pmdp_huge_split_prepare(vma, haddr, pmd);
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
 
@@ -3358,6 +3359,7 @@ int total_mapcount(struct page *page)
 int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct page *head = compound_head(page);
+       struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
        struct anon_vma *anon_vma;
        int count, mapcount, ret;
        bool mlocked;
@@ -3401,19 +3403,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                lru_add_drain();
 
        /* Prevent deferred_split_scan() touching ->_count */
-       spin_lock_irqsave(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        count = page_count(head);
        mapcount = total_mapcount(head);
        if (!mapcount && count == 1) {
                if (!list_empty(page_deferred_list(head))) {
-                       split_queue_len--;
+                       pgdata->split_queue_len--;
                        list_del(page_deferred_list(head));
                }
-               spin_unlock_irqrestore(&split_queue_lock, flags);
+               spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
                __split_huge_page(page, list);
                ret = 0;
        } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-               spin_unlock_irqrestore(&split_queue_lock, flags);
+               spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
                pr_alert("total_mapcount: %u, page_count(): %u\n",
                                mapcount, count);
                if (PageTail(page))
@@ -3421,7 +3423,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                dump_page(page, "total_mapcount(head) > 0");
                BUG();
        } else {
-               spin_unlock_irqrestore(&split_queue_lock, flags);
+               spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
                unfreeze_page(anon_vma, head);
                ret = -EBUSY;
        }
@@ -3436,64 +3438,65 @@ out:
 
 void free_transhuge_page(struct page *page)
 {
+       struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
        unsigned long flags;
 
-       spin_lock_irqsave(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        if (!list_empty(page_deferred_list(page))) {
-               split_queue_len--;
+               pgdata->split_queue_len--;
                list_del(page_deferred_list(page));
        }
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
        free_compound_page(page);
 }
 
 void deferred_split_huge_page(struct page *page)
 {
+       struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
        unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 
-       spin_lock_irqsave(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        if (list_empty(page_deferred_list(page))) {
-               list_add_tail(page_deferred_list(page), &split_queue);
-               split_queue_len++;
+               list_add_tail(page_deferred_list(page), &pgdata->split_queue);
+               pgdata->split_queue_len++;
        }
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
                struct shrink_control *sc)
 {
-       /*
-        * Split a page from split_queue will free up at least one page,
-        * at most HPAGE_PMD_NR - 1. We don't track exact number.
-        * Let's use HPAGE_PMD_NR / 2 as ballpark.
-        */
-       return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
+       struct pglist_data *pgdata = NODE_DATA(sc->nid);
+       return ACCESS_ONCE(pgdata->split_queue_len);
 }
 
 static unsigned long deferred_split_scan(struct shrinker *shrink,
                struct shrink_control *sc)
 {
+       struct pglist_data *pgdata = NODE_DATA(sc->nid);
        unsigned long flags;
        LIST_HEAD(list), *pos, *next;
        struct page *page;
        int split = 0;
 
-       spin_lock_irqsave(&split_queue_lock, flags);
-       list_splice_init(&split_queue, &list);
-
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        /* Take pin on all head pages to avoid freeing them under us */
-       list_for_each_safe(pos, next, &list) {
+       list_for_each_safe(pos, next, &pgdata->split_queue) {
                page = list_entry((void *)pos, struct page, mapping);
                page = compound_head(page);
-               /* race with put_compound_page() */
-               if (!get_page_unless_zero(page)) {
+               if (get_page_unless_zero(page)) {
+                       list_move(page_deferred_list(page), &list);
+               } else {
+                       /* We lost race with put_compound_page() */
                        list_del_init(page_deferred_list(page));
-                       split_queue_len--;
+                       pgdata->split_queue_len--;
                }
+               if (!--sc->nr_to_scan)
+                       break;
        }
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
        list_for_each_safe(pos, next, &list) {
                page = list_entry((void *)pos, struct page, mapping);
@@ -3505,17 +3508,24 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
                put_page(page);
        }
 
-       spin_lock_irqsave(&split_queue_lock, flags);
-       list_splice_tail(&list, &split_queue);
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+       list_splice_tail(&list, &pgdata->split_queue);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
-       return split * HPAGE_PMD_NR / 2;
+       /*
+        * Stop shrinker if we didn't split any page, but the queue is empty.
+        * This can happen if pages were freed under us.
+        */
+       if (!split && list_empty(&pgdata->split_queue))
+               return SHRINK_STOP;
+       return split;
 }
 
 static struct shrinker deferred_split_shrinker = {
        .count_objects = deferred_split_count,
        .scan_objects = deferred_split_scan,
        .seeks = DEFAULT_SEEKS,
+       .flags = SHRINKER_NUMA_AWARE,
 };
 
 #ifdef CONFIG_DEBUG_FS
index 12908dcf58316afd3f4b14d379a8e139249cd396..06ae13e869d0f088cdddc0862aa315ce8d488443 100644 (file)
@@ -1001,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
                nr_nodes--)
 
-#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
+#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
 static void destroy_compound_gigantic_page(struct page *page,
                                        unsigned int order)
 {
@@ -1214,8 +1214,8 @@ void free_huge_page(struct page *page)
 
        set_page_private(page, 0);
        page->mapping = NULL;
-       BUG_ON(page_count(page));
-       BUG_ON(page_mapcount(page));
+       VM_BUG_ON_PAGE(page_count(page), page);
+       VM_BUG_ON_PAGE(page_mapcount(page), page);
        restore_reserve = PagePrivate(page);
        ClearPagePrivate(page);
 
@@ -1286,6 +1286,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
                set_page_count(p, 0);
                set_compound_head(p, page);
        }
+       atomic_set(compound_mapcount_ptr(page), -1);
 }
 
 /*
index ed8b5ffcf9b16fbfcf3ccba0d182957980ad45ab..a38a21ebddb454540fc1dcae83be516ffc7cdf26 100644 (file)
@@ -216,6 +216,37 @@ static inline bool is_cow_mapping(vm_flags_t flags)
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+/*
+ * These three helpers classifies VMAs for virtual memory accounting.
+ */
+
+/*
+ * Executable code area - executable, not writable, not stack
+ */
+static inline bool is_exec_mapping(vm_flags_t flags)
+{
+       return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
+}
+
+/*
+ * Stack area - atomatically grows in one direction
+ *
+ * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
+ * do_mmap() forbids all other combinations.
+ */
+static inline bool is_stack_mapping(vm_flags_t flags)
+{
+       return (flags & VM_STACK) == VM_STACK;
+}
+
+/*
+ * Data area - private, writable, not stack
+ */
+static inline bool is_data_mapping(vm_flags_t flags)
+{
+       return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
+}
+
 /* mm/util.c */
 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent);
index d2ed81e59a94c3f81674ead9e84d51ca8ebd9950..dd7989929f13ae16e265bff30fd958f1f13993cb 100644 (file)
@@ -1448,7 +1448,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
  * Remaining API functions
  */
 
-phys_addr_t __init memblock_phys_mem_size(void)
+phys_addr_t __init_memblock memblock_phys_mem_size(void)
 {
        return memblock.memory.total_size;
 }
index 30991f83d0bf54dc537f1927a10883aa5787dd97..635451abc8f7c9b84663535678a26a9b0848c89d 100644 (file)
@@ -1591,10 +1591,15 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
         * without pte special, it would there be refcounted as a normal page.
         */
-       if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) {
+       if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
                struct page *page;
 
-               page = pfn_t_to_page(pfn);
+               /*
+                * At this point we are committed to insert_page()
+                * regardless of whether the caller specified flags that
+                * result in pfn_t_has_page() == false.
+                */
+               page = pfn_to_page(pfn_t_to_pfn(pfn));
                return insert_page(vma, addr, page, vma->vm_page_prot);
        }
        return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
@@ -2232,11 +2237,6 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
 
        page_cache_get(old_page);
 
-       /*
-        * Only catch write-faults on shared writable pages,
-        * read-only shared pages can get COWed by
-        * get_user_pages(.write=1, .force=1).
-        */
        if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
                int tmp;
 
index 27d135408a22057a5b166e7eb2bf2e080bbd33f2..4c4187c0e1deeb25bdbf5eb383132d27feb9be90 100644 (file)
@@ -548,8 +548,7 @@ retry:
                        goto retry;
                }
 
-               if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
-                       migrate_page_add(page, qp->pagelist, flags);
+               migrate_page_add(page, qp->pagelist, flags);
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
@@ -625,7 +624,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        unsigned long endvma = vma->vm_end;
        unsigned long flags = qp->flags;
 
-       if (vma->vm_flags & VM_PFNMAP)
+       if (!vma_migratable(vma))
                return 1;
 
        if (endvma > end)
@@ -644,16 +643,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 
        if (flags & MPOL_MF_LAZY) {
                /* Similar to task_numa_work, skip inaccessible VMAs */
-               if (vma_migratable(vma) &&
-                       vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
+               if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
                        change_prot_numa(vma, start, endvma);
                return 1;
        }
 
-       if ((flags & MPOL_MF_STRICT) ||
-           ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
-            vma_migratable(vma)))
-               /* queue pages from current vma */
+       /* queue pages from current vma */
+       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
                return 0;
        return 1;
 }
index 84b12624ceb01d83762172634179825b086961fd..2f2415a7a688da595a996a01d0ae0e1f0f955454 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -42,6 +42,7 @@
 #include <linux/memory.h>
 #include <linux/printk.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/moduleparam.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -69,6 +70,8 @@ const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
 #endif
 
+static bool ignore_rlimit_data = true;
+core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
 
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -387,8 +390,9 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 }
 
 #ifdef CONFIG_DEBUG_VM_RB
-static int browse_rb(struct rb_root *root)
+static int browse_rb(struct mm_struct *mm)
 {
+       struct rb_root *root = &mm->mm_rb;
        int i = 0, j, bug = 0;
        struct rb_node *nd, *pn = NULL;
        unsigned long prev = 0, pend = 0;
@@ -411,12 +415,14 @@ static int browse_rb(struct rb_root *root)
                                  vma->vm_start, vma->vm_end);
                        bug = 1;
                }
+               spin_lock(&mm->page_table_lock);
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
                        pr_emerg("free gap %lx, correct %lx\n",
                               vma->rb_subtree_gap,
                               vma_compute_subtree_gap(vma));
                        bug = 1;
                }
+               spin_unlock(&mm->page_table_lock);
                i++;
                pn = nd;
                prev = vma->vm_start;
@@ -453,12 +459,16 @@ static void validate_mm(struct mm_struct *mm)
        struct vm_area_struct *vma = mm->mmap;
 
        while (vma) {
+               struct anon_vma *anon_vma = vma->anon_vma;
                struct anon_vma_chain *avc;
 
-               vma_lock_anon_vma(vma);
-               list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
-                       anon_vma_interval_tree_verify(avc);
-               vma_unlock_anon_vma(vma);
+               if (anon_vma) {
+                       anon_vma_lock_read(anon_vma);
+                       list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+                               anon_vma_interval_tree_verify(avc);
+                       anon_vma_unlock_read(anon_vma);
+               }
+
                highest_address = vma->vm_end;
                vma = vma->vm_next;
                i++;
@@ -472,7 +482,7 @@ static void validate_mm(struct mm_struct *mm)
                          mm->highest_vm_end, highest_address);
                bug = 1;
        }
-       i = browse_rb(&mm->mm_rb);
+       i = browse_rb(mm);
        if (i != mm->map_count) {
                if (i != -1)
                        pr_emerg("map_count %d rb %d\n", mm->map_count, i);
@@ -2139,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
-       int error;
+       int error = 0;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
+       /* Guard against wrapping around to address 0. */
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else
+               return -ENOMEM;
+
+       /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
-        * Also guard against wrapping around to address 0.
         */
-       if (address < PAGE_ALIGN(address+4))
-               address = PAGE_ALIGN(address+4);
-       else {
-               vma_unlock_anon_vma(vma);
-               return -ENOMEM;
-       }
-       error = 0;
+       anon_vma_lock_write(vma->anon_vma);
 
        /* Somebody else might have raced and expanded it already */
        if (address > vma->vm_end) {
@@ -2182,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                 * updates, but we only hold a shared mmap_sem
                                 * lock here, so we need to protect against
                                 * concurrent vma expansions.
-                                * vma_lock_anon_vma() doesn't help here, as
+                                * anon_vma_lock_write() doesn't help here, as
                                 * we don't guarantee that all growable vmas
                                 * in a mm share the same root anon vma.
                                 * So, we reuse mm->page_table_lock to guard
@@ -2205,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        }
                }
        }
-       vma_unlock_anon_vma(vma);
+       anon_vma_unlock_write(vma->anon_vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
@@ -2221,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        int error;
 
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
-       if (unlikely(anon_vma_prepare(vma)))
-               return -ENOMEM;
-
        address &= PAGE_MASK;
        error = security_mmap_addr(address);
        if (error)
                return error;
 
-       vma_lock_anon_vma(vma);
+       /* We must make sure the anon_vma is allocated. */
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
+       anon_vma_lock_write(vma->anon_vma);
 
        /* Somebody else might have raced and expanded it already */
        if (address < vma->vm_start) {
@@ -2257,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma,
                                 * updates, but we only hold a shared mmap_sem
                                 * lock here, so we need to protect against
                                 * concurrent vma expansions.
-                                * vma_lock_anon_vma() doesn't help here, as
+                                * anon_vma_lock_write() doesn't help here, as
                                 * we don't guarantee that all growable vmas
                                 * in a mm share the same root anon vma.
                                 * So, we reuse mm->page_table_lock to guard
@@ -2278,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma,
                        }
                }
        }
-       vma_unlock_anon_vma(vma);
+       anon_vma_unlock_write(vma->anon_vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
@@ -2982,9 +2983,17 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
        if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
                return false;
 
-       if ((flags & (VM_WRITE | VM_SHARED | (VM_STACK_FLAGS &
-                               (VM_GROWSUP | VM_GROWSDOWN)))) == VM_WRITE)
-               return mm->data_vm + npages <= rlimit(RLIMIT_DATA);
+       if (is_data_mapping(flags) &&
+           mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
+               if (ignore_rlimit_data)
+                       pr_warn_once("%s (%d): VmData %lu exceed data ulimit "
+                                    "%lu. Will be forbidden soon.\n",
+                                    current->comm, current->pid,
+                                    (mm->data_vm + npages) << PAGE_SHIFT,
+                                    rlimit(RLIMIT_DATA));
+               else
+                       return false;
+       }
 
        return true;
 }
@@ -2993,11 +3002,11 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
 {
        mm->total_vm += npages;
 
-       if ((flags & (VM_EXEC | VM_WRITE)) == VM_EXEC)
+       if (is_exec_mapping(flags))
                mm->exec_vm += npages;
-       else if (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)))
+       else if (is_stack_mapping(flags))
                mm->stack_vm += npages;
-       else if ((flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+       else if (is_data_mapping(flags))
                mm->data_vm += npages;
 }
 
index 63358d9f9aa98eff0848879b0503b5d80b9ea8d0..838ca8bb64f7376062fc6670c759a27d7f59c7e3 100644 (file)
@@ -5209,6 +5209,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
        spin_lock_init(&pgdat->numabalancing_migrate_lock);
        pgdat->numabalancing_migrate_nr_pages = 0;
        pgdat->numabalancing_migrate_next_window = jiffies;
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       spin_lock_init(&pgdat->split_queue_lock);
+       INIT_LIST_HEAD(&pgdat->split_queue);
+       pgdat->split_queue_len = 0;
 #endif
        init_waitqueue_head(&pgdat->kswapd_wait);
        init_waitqueue_head(&pgdat->pfmemalloc_wait);
@@ -6615,7 +6620,7 @@ bool is_pageblock_removable_nolock(struct page *page)
        return !has_unmovable_pages(zone, page, 0, true);
 }
 
-#ifdef CONFIG_CMA
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 
 static unsigned long pfn_max_align_down(unsigned long pfn)
 {
index c108a6542d05d3e1c880c1f6ab540ced62156e54..4fb14ca5a41967696a6f769189eb5a59c6a91c0f 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,36 +230,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 /* Check if the vma is being used as a stack by this task */
-static int vm_is_stack_for_task(struct task_struct *t,
-                               struct vm_area_struct *vma)
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
 {
        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
-/*
- * Check if the vma is being used as a stack.
- * If is_group is non-zero, check in the entire thread group or else
- * just check in the current task. Returns the task_struct of the task
- * that the vma is stack for. Must be called under rcu_read_lock().
- */
-struct task_struct *task_of_stack(struct task_struct *task,
-                               struct vm_area_struct *vma, bool in_group)
-{
-       if (vm_is_stack_for_task(task, vma))
-               return task;
-
-       if (in_group) {
-               struct task_struct *t;
-
-               for_each_thread(task, t) {
-                       if (vm_is_stack_for_task(t, vma))
-                               return t;
-               }
-       }
-
-       return NULL;
-}
-
 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
index 9a6c0704211c856c300f67057fc202e25c9657ab..149fdf6c5c56f927f3613538c61b3c5831c80af9 100644 (file)
@@ -248,9 +248,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
 
        if (tree) {
                spin_lock(&vmpr->sr_lock);
-               vmpr->tree_scanned += scanned;
+               scanned = vmpr->tree_scanned += scanned;
                vmpr->tree_reclaimed += reclaimed;
-               scanned = vmpr->scanned;
                spin_unlock(&vmpr->sr_lock);
 
                if (scanned < vmpressure_win)
index eb3dd37ccd7c727dcc0b6030f62c183097b956bb..71b1c29948dba30aab0a894ddc7c84eb62acde2b 100644 (file)
@@ -1443,7 +1443,7 @@ int isolate_lru_page(struct page *page)
        int ret = -EBUSY;
 
        VM_BUG_ON_PAGE(!page_count(page), page);
-       VM_BUG_ON_PAGE(PageTail(page), page);
+       WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
 
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
index 40b2c74ddf16d69abc52574c469d9f22dd49cf03..084c6725b3734430483e7ea4fcf74e2ab67f7bfa 100644 (file)
@@ -1396,10 +1396,15 @@ static void vmstat_update(struct work_struct *w)
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
                 * update worker thread.
+                * If we were marked on cpu_stat_off clear the flag
+                * so that vmstat_shepherd doesn't schedule us again.
                 */
-               queue_delayed_work_on(smp_processor_id(), vmstat_wq,
-                       this_cpu_ptr(&vmstat_work),
-                       round_jiffies_relative(sysctl_stat_interval));
+               if (!cpumask_test_and_clear_cpu(smp_processor_id(),
+                                               cpu_stat_off)) {
+                       queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+                               this_cpu_ptr(&vmstat_work),
+                               round_jiffies_relative(sysctl_stat_interval));
+               }
        } else {
                /*
                 * We did not update any counters so the app may be in
@@ -1417,18 +1422,6 @@ static void vmstat_update(struct work_struct *w)
  * until the diffs stay at zero. The function is used by NOHZ and can only be
  * invoked when tick processing is not active.
  */
-void quiet_vmstat(void)
-{
-       if (system_state != SYSTEM_RUNNING)
-               return;
-
-       do {
-               if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
-                       cancel_delayed_work(this_cpu_ptr(&vmstat_work));
-
-       } while (refresh_cpu_vm_stats(false));
-}
-
 /*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
@@ -1452,6 +1445,30 @@ static bool need_update(int cpu)
        return false;
 }
 
+void quiet_vmstat(void)
+{
+       if (system_state != SYSTEM_RUNNING)
+               return;
+
+       /*
+        * If we are already in hands of the shepherd then there
+        * is nothing for us to do here.
+        */
+       if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+               return;
+
+       if (!need_update(smp_processor_id()))
+               return;
+
+       /*
+        * Just refresh counters and do not care about the pending delayed
+        * vmstat_update. It doesn't fire that often to matter and canceling
+        * it would be too expensive from this path.
+        * vmstat_shepherd will take care about that for us.
+        */
+       refresh_cpu_vm_stats(false);
+}
+
 
 /*
  * Shepherd worker thread that checks the
@@ -1469,18 +1486,25 @@ static void vmstat_shepherd(struct work_struct *w)
 
        get_online_cpus();
        /* Check processors whose vmstat worker threads have been disabled */
-       for_each_cpu(cpu, cpu_stat_off)
-               if (need_update(cpu) &&
-                       cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
-
-                       queue_delayed_work_on(cpu, vmstat_wq,
-                               &per_cpu(vmstat_work, cpu), 0);
+       for_each_cpu(cpu, cpu_stat_off) {
+               struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
+               if (need_update(cpu)) {
+                       if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
+                               queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+               } else {
+                       /*
+                        * Cancel the work if quiet_vmstat has put this
+                        * cpu on cpu_stat_off because the work item might
+                        * be still scheduled
+                        */
+                       cancel_delayed_work(dw);
+               }
+       }
        put_online_cpus();
 
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
-
 }
 
 static void __init start_shepherd_timer(void)
@@ -1488,7 +1512,7 @@ static void __init start_shepherd_timer(void)
        int cpu;
 
        for_each_possible_cpu(cpu)
-               INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
+               INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
                        vmstat_update);
 
        if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
index c6fc8f756c9aa500d1efa61de95b5757d2dccee8..2dd40e5ea030a1bbebdfbcad89ccdda78dfd36e9 100644 (file)
@@ -12,7 +12,7 @@ config BATMAN_ADV
           B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
           a routing protocol for multi-hop ad-hoc mesh networks. The
           networks may be wired or wireless. See
-          http://www.open-mesh.org/ for more information and user space
+          https://www.open-mesh.org/ for more information and user space
           tools.
 
 config BATMAN_ADV_BLA
index 21434ab79d2ce7ad4a959be69aebcaac7771233e..207e2af316c7bcfc64955a711bc4dc756ab46b9c 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
index 4e59cf3eb079ec0c1d1836a0be39e95901146b91..a7485d676088c367eb56b98a7030aca21c29374d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index df625de55ef2226fcc79a9dca11dc978ebe6d7df..3266bcb5bb06a550fedfeaca6b5962821e549193 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -88,7 +88,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
- * Returns computed average value.
+ * Return: computed average value.
  */
 static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 {
@@ -132,7 +132,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  *
- * Returns 0 on success, a negative error code otherwise.
+ * Return: 0 on success, a negative error code otherwise.
  */
 static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
                                     int max_if_num)
@@ -180,7 +180,7 @@ unlock:
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
  *
- * Returns 0 on success, a negative error code otherwise.
+ * Return: 0 on success, a negative error code otherwise.
  */
 static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
                                     int max_if_num, int del_if_num)
@@ -246,7 +246,7 @@ unlock:
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of the originator
  *
- * Returns the originator object corresponding to the passed mac address or NULL
+ * Return: the originator object corresponding to the passed mac address or NULL
  * on failure.
  * If the object does not exists it is created an initialised.
  */
@@ -396,7 +396,14 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
        return new_tq;
 }
 
-/* is there another aggregated packet here? */
+/**
+ * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+ * @tvlv_len: tvlv length of the previously considered OGM
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
 static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
                                      __be16 tvlv_len)
 {
@@ -522,7 +529,7 @@ out:
  * @if_outgoing: interface for which the retransmission should be considered
  * @forw_packet: the forwarded packet which should be checked
  *
- * Returns true if new_packet can be aggregated with forw_packet
+ * Return: true if new_packet can be aggregated with forw_packet
  */
 static bool
 batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
@@ -1125,7 +1132,7 @@ out:
  * @if_incoming: interface where the packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  *
- * Returns 1 if the link can be considered bidirectional, 0 otherwise
+ * Return: 1 if the link can be considered bidirectional, 0 otherwise
  */
 static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
                                 struct batadv_orig_node *orig_neigh_node,
@@ -1269,7 +1276,7 @@ out:
  * @if_incoming: interface on which the OGM packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  *
- * Returns duplicate status as enum batadv_dup_status
+ * Return: duplicate status as enum batadv_dup_status
  */
 static enum batadv_dup_status
 batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
@@ -1929,7 +1936,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
  * @neigh2: the second neighbor object of the comparison
  * @if_outgoing2: outgoing interface for the second neighbor
  *
- * Returns a value less, equal to or greater than 0 if the metric via neigh1 is
+ * Return: a value less, equal to or greater than 0 if the metric via neigh1 is
  * lower, the same as or higher than the metric via neigh2
  */
 static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
@@ -1970,7 +1977,7 @@ out:
  * @neigh2: the second neighbor object of the comparison
  * @if_outgoing2: outgoing interface for the second neighbor
  *
- * Returns true if the metric via neigh1 is equally good or better than
+ * Return: true if the metric via neigh1 is equally good or better than
  * the metric via neigh2, false otherwise.
  */
 static bool
index 25cbc36e997adab14dca5e3be3ee832ce57acccb..b56bb000a0abcbb2fe547612d12009a18e69d530 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -29,10 +29,16 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
        bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
 }
 
-/* receive and process one packet within the sequence number window.
+/**
+ * batadv_bit_get_packet - receive and process one packet within the sequence
+ *  number window
+ * @priv: the bat priv with all the soft interface information
+ * @seq_bits: pointer to the sequence number receive packet
+ * @seq_num_diff: difference between the current/received sequence number and
+ *  the last sequence number
+ * @set_mark: whether this packet should be marked in seq_bits
  *
- * returns:
- *  1 if the window was moved (either new or very old)
+ * Return: 1 if the window was moved (either new or very old),
  *  0 if the window was not moved/shifted.
  */
 int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
index 0226b220fe5b0bda455eb81ccc6a7e495bf296e7..3e41bb80eb81ac34dce4c3c34fa33a3643b19bdc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
 #include <linux/compiler.h>
 #include <linux/types.h>
 
-/* Returns 1 if the corresponding bit in the given seq_bits indicates true
+/**
+ * batadv_test_bit - check if bit is set in the current window
+ *
+ * @seq_bits: pointer to the sequence number receive packet
+ * @last_seqno: latest sequence number in seq_bits
+ * @curr_seqno: sequence number to test for
+ *
+ * Return: 1 if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno. Otherwise returns 0.
  */
 static inline int batadv_test_bit(const unsigned long *seq_bits,
@@ -48,9 +55,6 @@ static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
        set_bit(n, seq_bits); /* turn the position on */
 }
 
-/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old
- */
 int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
                          int set_mark);
 
index c24c481b666f776c864e31eefe92fdc2ca5fd779..77916093484464d20111f7b6b3cbf8e15926220c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -58,7 +58,13 @@ static void
 batadv_bla_send_announce(struct batadv_priv *bat_priv,
                         struct batadv_bla_backbone_gw *backbone_gw);
 
-/* return the index of the claim */
+/**
+ * batadv_choose_claim - choose the right bucket for a claim.
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the hash index of the claim
+ */
 static inline u32 batadv_choose_claim(const void *data, u32 size)
 {
        struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
@@ -70,7 +76,13 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
        return hash % size;
 }
 
-/* return the index of the backbone gateway */
+/**
+ * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the hash index of the backbone gateway
+ */
 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 {
        const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
@@ -82,7 +94,13 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
        return hash % size;
 }
 
-/* compares address and vid of two backbone gws */
+/**
+ * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * @node: list node of the first entry to compare
+ * @data2: pointer to the second backbone gateway
+ *
+ * Return: 1 if the backbones have the same data, 0 otherwise
+ */
 static int batadv_compare_backbone_gw(const struct hlist_node *node,
                                      const void *data2)
 {
@@ -100,7 +118,13 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
        return 1;
 }
 
-/* compares address and vid of two claims */
+/**
+ * batadv_compare_backbone_gw - compare address and vid of two claims
+ * @node: list node of the first entry to compare
+ * @data2: pointer to the second claims
+ *
+ * Return: 1 if the claim have the same data, 0 otherwise
+ */
 static int batadv_compare_claim(const struct hlist_node *node,
                                const void *data2)
 {
@@ -118,7 +142,10 @@ static int batadv_compare_claim(const struct hlist_node *node,
        return 1;
 }
 
-/* free a backbone gw */
+/**
+ * batadv_compare_backbone_gw - free backbone gw
+ * @backbone_gw: backbone gateway to be free'd
+ */
 static void
 batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
 {
@@ -126,14 +153,22 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
                kfree_rcu(backbone_gw, rcu);
 }
 
-/* finally deinitialize the claim */
+/**
+ * batadv_claim_release - release claim from lists and queue for free after rcu
+ *  grace period
+ * @ref: kref pointer of the claim
+ */
 static void batadv_claim_release(struct batadv_bla_claim *claim)
 {
        batadv_backbone_gw_free_ref(claim->backbone_gw);
        kfree_rcu(claim, rcu);
 }
 
-/* free a claim, call claim_free_rcu if its the last reference */
+/**
+ * batadv_claim_free_ref - decrement the claim refcounter and possibly
+ *  release it
+ * @claim: claim to be free'd
+ */
 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
 {
        if (atomic_dec_and_test(&claim->refcount))
@@ -141,12 +176,11 @@ static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_claim_hash_find
+ * batadv_claim_hash_find - looks for a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @data: search data (may be local/static data)
  *
- * looks for a claim in the hash, and returns it if found
- * or NULL otherwise.
+ * Return: claim if found or NULL otherwise.
  */
 static struct batadv_bla_claim
 *batadv_claim_hash_find(struct batadv_priv *bat_priv,
@@ -181,12 +215,12 @@ static struct batadv_bla_claim
 }
 
 /**
- * batadv_backbone_hash_find - looks for a claim in the hash
+ * batadv_backbone_hash_find - looks for a backbone gateway in the hash
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  * @vid: the VLAN ID
  *
- * Returns claim if found or NULL otherwise.
+ * Return: backbone gateway if found or NULL otherwise
  */
 static struct batadv_bla_backbone_gw *
 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
@@ -224,7 +258,10 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
        return backbone_gw_tmp;
 }
 
-/* delete all claims for a backbone */
+/**
+ * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * @backbone_gw: backbone gateway where the claims should be removed
+ */
 static void
 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 {
@@ -372,14 +409,13 @@ out:
 }
 
 /**
- * batadv_bla_get_backbone_gw
+ * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
  * @own_backbone: set if the requested backbone is local
  *
- * searches for the backbone gw or creates a new one if it could not
- * be found.
+ * Return: the (possibly created) backbone gateway or NULL on error
  */
 static struct batadv_bla_backbone_gw *
 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
@@ -445,7 +481,13 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
        return entry;
 }
 
-/* update or add the own backbone gw to make sure we announce
+/**
+ * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface
+ * @vid: VLAN identifier
+ *
+ * update or add the own backbone gw to make sure we announce
  * where we receive other backbone gws
  */
 static void
@@ -542,12 +584,9 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_announce
+ * batadv_bla_send_announce - Send an announcement frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: our backbone gateway which should be announced
- *
- * This function sends an announcement. It is called from multiple
- * places.
  */
 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
                                     struct batadv_bla_backbone_gw *backbone_gw)
@@ -637,8 +676,11 @@ claim_free_ref:
        batadv_claim_free_ref(claim);
 }
 
-/* Delete a claim from the claim hash which has the
- * given mac address and vid.
+/**
+ * batadv_bla_del_claim - delete a claim from the claim hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: mac address of the claim to be removed
+ * @vid: VLAN id for the claim to be removed
  */
 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid)
@@ -666,7 +708,15 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_claim_free_ref(claim);
 }
 
-/* check for ANNOUNCE frame, return 1 if handled */
+/**
+ * batadv_handle_announce - check for ANNOUNCE frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @an_addr: announcement mac address (ARP Sender HW address)
+ * @backbone_addr: originator address of the sender (Ethernet source MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
                                  u8 *backbone_addr, unsigned short vid)
 {
@@ -716,7 +766,16 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
        return 1;
 }
 
-/* check for REQUEST frame, return 1 if handled */
+/**
+ * batadv_handle_request - check for REQUEST frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
+ * @ethhdr: ethernet header of a packet
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_request(struct batadv_priv *bat_priv,
                                 struct batadv_hard_iface *primary_if,
                                 u8 *backbone_addr, struct ethhdr *ethhdr,
@@ -740,7 +799,16 @@ static int batadv_handle_request(struct batadv_priv *bat_priv,
        return 1;
 }
 
-/* check for UNCLAIM frame, return 1 if handled */
+/**
+ * batadv_handle_unclaim - check for UNCLAIM frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: originator address of the backbone (Ethernet source)
+ * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
                                 struct batadv_hard_iface *primary_if,
                                 u8 *backbone_addr, u8 *claim_addr,
@@ -769,7 +837,16 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        return 1;
 }
 
-/* check for CLAIM frame, return 1 if handled */
+/**
+ * batadv_handle_claim - check for CLAIM frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: originator address of the backbone (Ethernet Source)
+ * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_claim(struct batadv_priv *bat_priv,
                               struct batadv_hard_iface *primary_if,
                               u8 *backbone_addr, u8 *claim_addr,
@@ -798,7 +875,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_check_claim_group
+ * batadv_check_claim_group - check for claim group membership
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
@@ -809,7 +886,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
  * This function also applies the group ID of the sender
  * if it is in the same mesh.
  *
- * returns:
+ * Return:
  *     2  - if it is a claim packet and on the same group
  *     1  - if is a claim packet from another group
  *     0  - if it is not a claim packet
@@ -873,14 +950,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_process_claim
+ * batadv_bla_process_claim - Check if this is a claim frame, and process it
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
  *
- * Check if this is a claim frame, and process it accordingly.
- *
- * returns 1 if it was a claim frame, otherwise return 0 to
+ * Return: 1 if it was a claim frame, otherwise return 0 to
  * tell the callee that it can use the frame on its own.
  */
 static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
@@ -1011,7 +1086,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        return 1;
 }
 
-/* Check when we last heard from other nodes, and remove them in case of
+/**
+ * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ *  immediately
+ * @bat_priv: the bat priv with all the soft interface information
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we last heard from other nodes, and remove them in case of
  * a time out, or clean all backbone gws if now is set.
  */
 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
@@ -1059,7 +1140,7 @@ purge_now:
 }
 
 /**
- * batadv_bla_purge_claims
+ * batadv_bla_purge_claims - Remove claims after a timeout or immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface, may be NULL if now is set
  * @now: whether the whole hash shall be wiped now
@@ -1108,12 +1189,11 @@ purge_now:
 }
 
 /**
- * batadv_bla_update_orig_address
+ * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ *  originator address changes
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the new selected primary_if
  * @oldif: the old primary interface, may be NULL
- *
- * Update the backbone gateways when the own orig address changes.
  */
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
@@ -1184,7 +1264,11 @@ void batadv_bla_status_update(struct net_device *net_dev)
        batadv_hardif_free_ref(primary_if);
 }
 
-/* periodic work to do:
+/**
+ * batadv_bla_periodic_work - performs periodic bla work
+ * @work: kernel work struct
+ *
+ * periodic work to do:
  *  * purge structures when they are too old
  *  * send announcements
  */
@@ -1265,7 +1349,12 @@ out:
 static struct lock_class_key batadv_claim_hash_lock_class_key;
 static struct lock_class_key batadv_backbone_hash_lock_class_key;
 
-/* initialize all bla structures */
+/**
+ * batadv_bla_init - initialize all bla structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success, < 0 on error.
+ */
 int batadv_bla_init(struct batadv_priv *bat_priv)
 {
        int i;
@@ -1320,7 +1409,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_check_bcast_duplist
+ * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: contains the bcast_packet to be checked
  *
@@ -1332,6 +1421,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
  * with a good chance that it is the same packet. If it is furthermore
  * sent by another host, drop it. We allow equal packets from
  * the same host however as this might be intended.
+ *
+ * Return: 1 if a packet is in the duplicate list, 0 otherwise.
  */
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb)
@@ -1390,14 +1481,13 @@ out:
 }
 
 /**
- * batadv_bla_is_backbone_gw_orig
+ * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ *  the VLAN identified by vid.
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
  * @vid: VLAN identifier
  *
- * Check if the originator is a gateway for the VLAN identified by vid.
- *
- * Returns true if orig is a backbone for this vid, false otherwise.
+ * Return: true if orig is a backbone for this vid, false otherwise.
  */
 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
                                    unsigned short vid)
@@ -1431,14 +1521,13 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_is_backbone_gw
+ * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
  * @skb: the frame to be checked
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
  *
- * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
- * if the orig_node is also a gateway on the soft interface, otherwise it
- * returns 0.
+ * Return: 1 if the orig_node is also a gateway on the soft interface, otherwise
+ * it returns 0.
  */
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size)
@@ -1465,7 +1554,12 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
        return 1;
 }
 
-/* free all bla structures (for softinterface free or module unload) */
+/**
+ * batadv_bla_init - free all bla structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * for softinterface free or module unload
+ */
 void batadv_bla_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *primary_if;
@@ -1488,18 +1582,19 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_rx
+ * batadv_bla_rx - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
  * @is_bcast: the packet came in a broadcast packet type.
  *
- * bla_rx avoidance checks if:
+ * batadv_bla_rx avoidance checks if:
  *  * we have to race for a claim
  *  * if the frame is allowed on the LAN
  *
- * in these cases, the skb is further handled by this function and
- * returns 1, otherwise it returns 0 and the caller shall further
+ * in these cases, the skb is further handled by this function
+ *
+ * Return: 1 if handled, otherwise it returns 0 and the caller shall further
  * process the skb.
  */
 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
@@ -1583,20 +1678,21 @@ out:
 }
 
 /**
- * batadv_bla_tx
+ * batadv_bla_tx - check packets going into the mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
  *
- * bla_tx checks if:
+ * batadv_bla_tx checks if:
  *  * a claim was received which has to be processed
  *  * the frame is allowed on the mesh
  *
- * in these cases, the skb is further handled by this function and
- * returns 1, otherwise it returns 0 and the caller shall further
- * process the skb.
+ * in these cases, the skb is further handled by this function.
  *
  * This call might reallocate skb data.
+ *
+ * Return: 1 if handled, otherwise it returns 0 and the caller shall further
+ * process the skb.
  */
 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                  unsigned short vid)
@@ -1670,6 +1766,13 @@ out:
        return ret;
 }
 
+/**
+ * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1719,6 +1822,14 @@ out:
        return 0;
 }
 
+/**
+ * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
+ *  file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
index 7ea199b8b5ab2ab64dba9e56c4ccfba272e80c37..579f0fa6fe6a47c7fd1c7bba73dd4496cec24ca7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
index 037ad0a5f485e133a571aeea8d1d9534730612e9..48253cf8341bd82d70d0cd218571741b9257fa37 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -281,6 +281,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
  *  originator table of an hard interface
  * @inode: inode pointer to debugfs file
  * @file: pointer to the seq_file
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 static int batadv_originators_hardif_open(struct inode *inode,
                                          struct file *file)
@@ -329,6 +331,8 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
  * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
  * @inode: inode which was opened
  * @file: file handle to be initialized
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 static int batadv_dat_cache_open(struct inode *inode, struct file *file)
 {
@@ -483,6 +487,8 @@ void batadv_debugfs_destroy(void)
  * batadv_debugfs_add_hardif - creates the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which should be added.
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
 {
index 80ab8d6f0ab3c0e70cb468bc9dc2c8d7e8e17815..1ab4e2e63afc885d124a16d50c10707e9ab9615a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index a49c705fb86b861f5595c8c0cfb7b8b1e1010589..017fffe9a5b8d125d3f1f2ec9ffa1e964767891c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
@@ -76,7 +76,7 @@ static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
  * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
- * Returns true if the entry has to be purged now, false otherwise.
+ * Return: true if the entry has to be purged now, false otherwise.
  */
 static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 {
@@ -151,7 +151,7 @@ static void batadv_dat_purge(struct work_struct *work)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Returns 1 if the two entries are the same, 0 otherwise.
+ * Return: 1 if the two entries are the same, 0 otherwise.
  */
 static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
 {
@@ -166,7 +166,7 @@ static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the hw_src field in the ARP packet.
+ * Return: the value of the hw_src field in the ARP packet.
  */
 static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 {
@@ -183,7 +183,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the ip_src field in the ARP packet.
+ * Return: the value of the ip_src field in the ARP packet.
  */
 static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 {
@@ -195,7 +195,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the hw_dst field in the ARP packet.
+ * Return: the value of the hw_dst field in the ARP packet.
  */
 static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 {
@@ -207,7 +207,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the ip_dst field in the ARP packet.
+ * Return: the value of the ip_dst field in the ARP packet.
  */
 static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 {
@@ -219,7 +219,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
  * @data: data to hash
  * @size: size of the hash table
  *
- * Returns the selected index in the hash table for the given data.
+ * Return: the selected index in the hash table for the given data.
  */
 static u32 batadv_hash_dat(const void *data, u32 size)
 {
@@ -256,7 +256,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
  * @ip: search key
  * @vid: VLAN identifier
  *
- * Returns the dat_entry if found, NULL otherwise.
+ * Return: the dat_entry if found, NULL otherwise.
  */
 static struct batadv_dat_entry *
 batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
@@ -440,7 +440,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * @candidate: orig_node under evaluation
  * @max_orig_node: last selected candidate
  *
- * Returns true if the node has been elected as next candidate or false
+ * Return: true if the node has been elected as next candidate or false
  * otherwise.
  */
 static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
@@ -558,7 +558,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * closest values (from the LEFT, with wrap around if needed) then the hash
  * value of the key. ip_dst is the key.
  *
- * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
+ * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
 batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
@@ -602,7 +602,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * This function copies the skb with pskb_copy() and is sent as unicast packet
  * to each of the selected candidates.
  *
- * Returns true if the packet is sent to at least one candidate, false
+ * Return: true if the packet is sent to at least one candidate, false
  * otherwise.
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
@@ -741,6 +741,8 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
 /**
  * batadv_dat_init - initialise the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 in case of success, a negative error code otherwise
  */
 int batadv_dat_init(struct batadv_priv *bat_priv)
 {
@@ -779,6 +781,8 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
  * batadv_dat_cache_seq_print_text - print the local DAT hash table
  * @seq: seq file to print on
  * @offset: not used
+ *
+ * Return: always 0
  */
 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
 {
@@ -831,7 +835,7 @@ out:
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
  *
- * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise.
+ * Return: the ARP type if the skb contains a valid ARP packet, 0 otherwise.
  */
 static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
                               struct sk_buff *skb, int hdr_size)
@@ -904,8 +908,9 @@ out:
  * @skb: the buffer containing the packet to extract the VID from
  * @hdr_size: the size of the batman-adv header encapsulating the packet
  *
- * If the packet embedded in the skb is vlan tagged this function returns the
- * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ * Return: If the packet embedded in the skb is vlan tagged this function
+ * returns the VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS
+ * is returned.
  */
 static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
 {
@@ -930,7 +935,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  *
- * Returns true if the message has been sent to the dht candidates, false
+ * Return: true if the message has been sent to the dht candidates, false
  * otherwise. In case of a positive return value the message has to be enqueued
  * to permit the fallback.
  */
@@ -1020,7 +1025,7 @@ out:
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
  *
- * Returns true if the request has been answered, false otherwise.
+ * Return: true if the request has been answered, false otherwise.
  */
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb, int hdr_size)
@@ -1143,7 +1148,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
  *
- * Returns true if the packet was snooped and consumed by DAT. False if the
+ * Return: true if the packet was snooped and consumed by DAT. False if the
  * packet has to be delivered to the interface
  */
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
@@ -1200,7 +1205,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
- * Returns true if the node can drop the packet, false otherwise.
+ * Return: true if the node can drop the packet, false otherwise.
  */
 bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
                                      struct batadv_forw_packet *forw_packet)
index 26d4a525a798ec37b644e789b5176475e0cf0d92..813ecea96cf9334700fa219a41fe1cf5a20f791d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 20d9282f895b2d3115f056f09b81090b5d0956a0..55656e84bc7e1a143615ad097ea5a492e2451474 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -85,7 +85,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
 /**
  * batadv_frag_size_limit - maximum possible size of packet to be fragmented
  *
- * Returns the maximum size of payload that can be fragmented.
+ * Return: the maximum size of payload that can be fragmented.
  */
 static int batadv_frag_size_limit(void)
 {
@@ -107,7 +107,7 @@ static int batadv_frag_size_limit(void)
  *
  * Caller must hold chain->lock.
  *
- * Returns true if chain is empty and caller can just insert the new fragment
+ * Return: true if chain is empty and caller can just insert the new fragment
  * without searching for the right position.
  */
 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
@@ -136,7 +136,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
  * Insert a new fragment into the reverse ordered chain in the right table
  * entry. The hash table entry is cleared if "old" fragments exist in it.
  *
- * Returns true if skb is buffered, false on error. If the chain has all the
+ * Return: true if skb is buffered, false on error. If the chain has all the
  * fragments needed to merge the packet, the chain is moved to the passed head
  * to avoid locking the chain in the table.
  */
@@ -242,12 +242,11 @@ err:
 /**
  * batadv_frag_merge_packets - merge a chain of fragments
  * @chain: head of chain with fragments
- * @skb: packet with total size of skb after merging
  *
  * Expand the first skb in the chain and copy the content of the remaining
  * skb's into the expanded one. After doing so, clear the chain.
  *
- * Returns the merged skb or NULL on error.
+ * Return: the merged skb or NULL on error.
  */
 static struct sk_buff *
 batadv_frag_merge_packets(struct hlist_head *chain)
@@ -307,6 +306,9 @@ free:
  * There are three possible outcomes: 1) Packet is merged: Return true and
  * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
  * to NULL; 3) Error: Return false and leave skb as is.
+ *
+ * Return: true when packet is merged or buffered, false when skb is not not
+ * used.
  */
 bool batadv_frag_skb_buffer(struct sk_buff **skb,
                            struct batadv_orig_node *orig_node_src)
@@ -344,7 +346,7 @@ out_err:
  * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
  * without merging it.
  *
- * Returns true if the fragment is consumed/forwarded, false otherwise.
+ * Return: true if the fragment is consumed/forwarded, false otherwise.
  */
 bool batadv_frag_skb_fwd(struct sk_buff *skb,
                         struct batadv_hard_iface *recv_if,
@@ -399,7 +401,7 @@ out:
  * passed mtu and the old one with the rest. The new skb contains data from the
  * tail of the old skb.
  *
- * Returns the new fragment, NULL on error.
+ * Return: the new fragment, NULL on error.
  */
 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
                                          struct batadv_frag_packet *frag_head,
@@ -433,7 +435,7 @@ err:
  * @orig_node: final destination of the created fragments
  * @neigh_node: next-hop of the created fragments
  *
- * Returns true on success, false otherwise.
+ * Return: true on success, false otherwise.
  */
 bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_orig_node *orig_node,
index 8b9877e70b95eaa9307e66749446ab12441dfbb2..9ff77c7ef7c7719aab1376675cf449b9e250b4ab 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -42,7 +42,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
  * batadv_frag_check_entry - check if a list of fragments has timed out
  * @frags_entry: table entry to check
  *
- * Returns true if the frags entry has timed out, false otherwise.
+ * Return: true if the frags entry has timed out, false otherwise.
  */
 static inline bool
 batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
index e6c8382c79ba86dfea5078a37f692f74ebffb01c..5950974de7b1fa6a20d350bfb2a6a7f1a94141a6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -456,7 +456,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  *
- * Returns gateway node if found or NULL otherwise.
+ * Return: gateway node if found or NULL otherwise.
  */
 static struct batadv_gw_node *
 batadv_gw_node_get(struct batadv_priv *bat_priv,
@@ -655,13 +655,13 @@ out:
  * @chaddr: buffer where the client address will be stored. Valid
  *  only if the function returns BATADV_DHCP_TO_CLIENT
  *
- * Returns:
+ * This function may re-allocate the data buffer of the skb passed as argument.
+ *
+ * Return:
  * - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error
  *   while parsing it
  * - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server
  * - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client
- *
- * This function may re-allocate the data buffer of the skb passed as argument.
  */
 enum batadv_dhcp_recipient
 batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
@@ -776,11 +776,11 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
  * server. Due to topology changes it may be the case that the GW server
  * previously selected is not the best one anymore.
  *
- * Returns true if the packet destination is unicast and it is not the best gw,
- * false otherwise.
- *
  * This call might reallocate skb data.
  * Must be invoked only when the DHCP packet is going TO a DHCP SERVER.
+ *
+ * Return: true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
  */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                            struct sk_buff *skb)
index fa9527785ed3c62aaf3fcd37c7e2439e01498d0a..582dd8c413c838a4958f726cec8c1de69c164d37 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index b51bface8bdd72d2ad7e9f397684cd45d8b26316..5ee04f7140af7bf9d459488b8c886d8df8873cd3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -38,7 +38,7 @@
  * @description: text shown when throughput string cannot be parsed
  * @throughput: pointer holding the returned throughput information
  *
- * Returns false on parse error and true otherwise.
+ * Return: false on parse error and true otherwise.
  */
 static bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
                                    const char *description, u32 *throughput)
index ab893e3182292b5b67ec9fb821aaae9157c79913..b58346350024dbe3eab5f365d3a59073953b59a3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 01acccc4d2185806ae6dd37b2f0f091d9a0b92a0..db90022c00a4d0858923065896fbbdda40e61c55 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -85,7 +85,7 @@ out:
  * This function recursively checks all the fathers of the device passed as
  * argument looking for a batman-adv soft interface.
  *
- * Returns true if the device is descendant of a batman-adv mesh interface (or
+ * Return: true if the device is descendant of a batman-adv mesh interface (or
  * if it is a batman-adv interface itself), false otherwise
  */
 static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
@@ -136,7 +136,7 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
  *  interface
  * @net_device: the device to check
  *
- * Returns true if the net device is a 802.11 wireless device, false otherwise.
+ * Return: true if the net device is a 802.11 wireless device, false otherwise.
  */
 bool batadv_is_wifi_netdev(struct net_device *net_device)
 {
@@ -401,7 +401,8 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
  *
  * Invoke ndo_del_slave on master passing slave as argument. In this way slave
  * is free'd and master can correctly change its internal state.
- * Return 0 on success, a negative value representing the error otherwise
+ *
+ * Return: 0 on success, a negative value representing the error otherwise
  */
 static int batadv_master_del_slave(struct batadv_hard_iface *slave,
                                   struct net_device *master)
index 7b12ea8ea29d1a398f649b40c97106705bcd2289..4d6b5e12331f25d5d603a44783db434142257cd1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 2ea6a18d793fe9f184af18322885e20d115815c6..a0a0fdb8580513215a59f971dd199aa00dc10d0f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 377626250ac7129e1cf65d1768e0c2ee4643b59e..9bb57b87447cc0ca43c8c9bf31625c3a1f00303c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
 struct lock_class_key;
 
 /* callback to a compare function.  should compare 2 element datas for their
- * keys, return 0 if same and not 0 if not same
+ * keys
+ *
+ * Return: 0 if same and not 0 if not same
  */
 typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
                                          const void *);
 
-/* the hashfunction, should return an index
- * based on the key in the data of the first
- * argument and the size the second
+/* the hashfunction
+ *
+ * Return: an index based on the key in the data of the first argument and the
+ * size the second
  */
 typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
@@ -96,7 +99,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
  *     @data: data passed to the aforementioned callbacks as argument
  *     @data_node: to be added element
  *
- *     Returns 0 on success, 1 if the element already is in the hash
+ *     Return: 0 on success, 1 if the element already is in the hash
  *     and -1 on error.
  */
 static inline int batadv_hash_add(struct batadv_hashtable *hash,
@@ -139,10 +142,11 @@ out:
        return ret;
 }
 
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error .  data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing.
+/* removes data from hash, if found. data could be the structure you use with
+ * just the key filled, we just need the key for comparing.
+ *
+ * Return: returns pointer do data on success, so you can remove the used
+ * structure yourself, or NULL on error
  */
 static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
                                       batadv_hashdata_compare_cb compare,
index bcabb5e3f4d3a2c3478999b3deb80b1a7595500b..a69da37bbad57dbcfc01d75406d0011ea83ec805 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index e937143f0b102d36682f30b778c06a24d1c876a5..618d5de06f202b8ea630c90513d88541f1503014 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 4b5d61fbadb1fb77b2f8484f74abd231dd789346..5f319fd6ecd76da333c7aceaa9c6af1b0c7ebbd3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -233,7 +233,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
  *
- * Returns 'true' if the mac address was found, false otherwise.
+ * Return: 'true' if the mac address was found, false otherwise.
  */
 bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 {
@@ -262,7 +262,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
  *  function that requires the primary interface
  * @seq: debugfs table seq_file struct
  *
- * Returns primary interface if found or NULL otherwise.
+ * Return: primary interface if found or NULL otherwise.
  */
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
@@ -297,7 +297,7 @@ out:
  * batadv_max_header_len - calculate maximum encapsulation overhead for a
  *  payload packet
  *
- * Return the maximum encapsulation overhead in bytes.
+ * Return: the maximum encapsulation overhead in bytes.
  */
 int batadv_max_header_len(void)
 {
@@ -599,6 +599,8 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
  *
  * payload_ptr must always point to an address in the skb head buffer and not to
  * a fragment.
+ *
+ * Return: big endian crc32c of the checksummed data
  */
 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
 {
@@ -640,7 +642,7 @@ batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
  * @type: tvlv handler type to look for
  * @version: tvlv handler version to look for
  *
- * Returns tvlv handler if found or NULL otherwise.
+ * Return: tvlv handler if found or NULL otherwise.
  */
 static struct batadv_tvlv_handler
 *batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
@@ -688,7 +690,7 @@ static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
  * Has to be called with the appropriate locks being acquired
  * (tvlv.container_list_lock).
  *
- * Returns tvlv container if found or NULL otherwise.
+ * Return: tvlv container if found or NULL otherwise.
  */
 static struct batadv_tvlv_container
 *batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
@@ -720,7 +722,7 @@ static struct batadv_tvlv_container
  * Has to be called with the appropriate locks being acquired
  * (tvlv.container_list_lock).
  *
- * Returns size of all currently registered tvlv containers in bytes.
+ * Return: size of all currently registered tvlv containers in bytes.
  */
 static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 {
@@ -826,7 +828,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
  * @additional_packet_len: requested additional packet size on top of minimum
  *  size
  *
- * Returns true of the packet buffer could be changed to the requested size,
+ * Return: true of the packet buffer could be changed to the requested size,
  * false otherwise.
  */
 static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
@@ -862,7 +864,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
  * The ogm packet might be enlarged or shrunk depending on the current size
  * and the size of the to-be-appended tvlv containers.
  *
- * Returns size of all appended tvlv containers in bytes.
+ * Return: size of all appended tvlv containers in bytes.
  */
 u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
                                     unsigned char **packet_buff,
@@ -915,7 +917,7 @@ end:
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  *
- * Returns success if handler was not found or the return value of the handler
+ * Return: success if handler was not found or the return value of the handler
  * callback.
  */
 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
@@ -968,7 +970,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  *
- * Returns success when processing an OGM or the return value of all called
+ * Return: success when processing an OGM or the return value of all called
  * handler callbacks.
  */
 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
@@ -1190,8 +1192,8 @@ out:
  * @skb: the buffer containing the packet
  * @header_len: length of the batman header preceding the ethernet header
  *
- * If the packet embedded in the skb is vlan tagged this function returns the
- * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
+ * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
  */
 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
 {
@@ -1218,7 +1220,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
  *
- * Returns true if AP isolation is on for the VLAN idenfied by vid, false
+ * Return: true if AP isolation is on for the VLAN idenfied by vid, false
  * otherwise
  */
 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
index 9dbd9107e7e1333abc996e5b9da54de9c97e480b..a7dc41a2709bd0746f0b27fae109ad832c8883d3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -273,9 +273,14 @@ static inline void _batadv_dbg(int type __always_unused,
                pr_err("%s: " fmt, _netdev->name, ## arg);              \
        } while (0)
 
-/* returns 1 if they are the same ethernet addr
+/**
+ * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * @data1: Pointer to a six-byte array containing the Ethernet address
+ * @data2: Pointer other six-byte array containing the Ethernet address
  *
  * note: can't use ether_addr_equal() as it requires aligned memory
+ *
+ * Return: 1 if they are the same ethernet addr
  */
 static inline bool batadv_compare_eth(const void *data1, const void *data2)
 {
@@ -287,7 +292,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
  * @timestamp:         base value to compare with (in jiffies)
  * @timeout:           added to base value before comparing (in milliseconds)
  *
- * Returns true if current time is after timestamp + timeout
+ * Return: true if current time is after timestamp + timeout
  */
 static inline bool batadv_has_timed_out(unsigned long timestamp,
                                        unsigned int timeout)
@@ -326,7 +331,13 @@ static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
 
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
-/* Sum and return the cpu-local counters for index 'idx' */
+/**
+ * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: index of counter to sum up
+ *
+ * Return: sum of all cpu-local counters
+ */
 static inline u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
 {
        u64 *counters, sum = 0;
index 75fa5013af724e9bb5e00aa7e98cae74774cc319..155565e0fecce4237eb0ed4304fd99923215c9d1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
@@ -55,7 +55,7 @@
  * Collect multicast addresses of the local multicast listeners
  * on the given soft interface, dev, in the given mcast_list.
  *
- * Returns -ENOMEM on memory allocation error or the number of
+ * Return: -ENOMEM on memory allocation error or the number of
  * items added to the mcast_list otherwise.
  */
 static int batadv_mcast_mla_softif_get(struct net_device *dev,
@@ -87,7 +87,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
  * @mcast_addr: the multicast address to check
  * @mcast_list: the list with multicast addresses to search in
  *
- * Returns true if the given address is already in the given list.
+ * Return: true if the given address is already in the given list.
  * Otherwise returns false.
  */
 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
@@ -195,8 +195,9 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
  * batadv_mcast_has_bridge - check whether the soft-iface is bridged
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Checks whether there is a bridge on top of our soft interface. Returns
- * true if so, false otherwise.
+ * Checks whether there is a bridge on top of our soft interface.
+ *
+ * Return: true if there is a bridge, false otherwise.
  */
 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
 {
@@ -218,7 +219,7 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
  * Updates the own multicast tvlv with our current multicast related settings,
  * capabilities and inabilities.
  *
- * Returns true if the tvlv container is registered afterwards. Otherwise
+ * Return: true if the tvlv container is registered afterwards. Otherwise
  * returns false.
  */
 static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
@@ -289,8 +290,8 @@ out:
  * Checks whether the given IPv4 packet has the potential to be forwarded with a
  * mode more optimal than classic flooding.
  *
- * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM in case of
- * memory allocation failure.
+ * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
+ * allocation failure.
  */
 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
                                             struct sk_buff *skb,
@@ -327,8 +328,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
  * Checks whether the given IPv6 packet has the potential to be forwarded with a
  * mode more optimal than classic flooding.
  *
- * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM if we are out
- * of memory.
+ * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
  */
 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
                                             struct sk_buff *skb,
@@ -366,8 +366,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
  * Checks whether the given multicast ethernet frame has the potential to be
  * forwarded with a mode more optimal than classic flooding.
  *
- * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM if we are out
- * of memory.
+ * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
  */
 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
                                        struct sk_buff *skb,
@@ -398,7 +397,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
  *
- * Returns the number of nodes which want all IPv4 multicast traffic if the
+ * Return: the number of nodes which want all IPv4 multicast traffic if the
  * given ethhdr is from an IPv4 packet or the number of nodes which want all
  * IPv6 traffic if it matches an IPv6 packet.
  */
@@ -421,7 +420,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the ether header containing the multicast destination
  *
- * Returns an orig_node matching the multicast address provided by ethhdr
+ * Return: an orig_node matching the multicast address provided by ethhdr
  * via a translation table lookup. This increases the returned nodes refcount.
  */
 static struct batadv_orig_node *
@@ -436,7 +435,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
  * batadv_mcast_want_forw_ipv4_node_get - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
  * increases its refcount.
  */
 static struct batadv_orig_node *
@@ -463,7 +462,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
  * batadv_mcast_want_forw_ipv6_node_get - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
  * and increases its refcount.
  */
 static struct batadv_orig_node *
@@ -491,7 +490,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
  * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
  * increases its refcount.
  */
@@ -514,7 +513,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
  * batadv_mcast_want_forw_unsnoop_node_get - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
  * set and increases its refcount.
  */
 static struct batadv_orig_node *
@@ -543,7 +542,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
  *
- * Returns the forwarding mode as enum batadv_forw_mode and in case of
+ * Return: the forwarding mode as enum batadv_forw_mode and in case of
  * BATADV_FORW_SINGLE set the orig to the single originator the skb
  * should be forwarded to.
  */
index 8f3cb04b9f13f3e56b360781afc238dc64aeb27a..80bceec55592a7e7347bc09140b177a88bdccf2b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
@@ -23,7 +23,7 @@
 struct sk_buff;
 
 /**
- * batadv_forw_mode - the way a packet should be forwarded as
+ * enum batadv_forw_mode - the way a packet should be forwarded as
  * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
  *  flooding)
  * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
index cc63b44f0d2e2fdc61b41be26e745dea7567f28c..0b30c15eee5fc7d97fb3531b9991088aa6e64908 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
@@ -64,6 +64,8 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 
 /**
  * batadv_nc_init - one-time initialization for network coding
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int __init batadv_nc_init(void)
 {
@@ -142,6 +144,8 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 /**
  * batadv_nc_mesh_init - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
@@ -251,7 +255,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet)
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_node: the nc node to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Return: true if the entry has to be purged now, false otherwise
  */
 static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
                                       struct batadv_nc_node *nc_node)
@@ -267,7 +271,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Return: true if the entry has to be purged now, false otherwise
  */
 static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
                                              struct batadv_nc_path *nc_path)
@@ -287,7 +291,7 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Return: true if the entry has to be purged now, false otherwise
  */
 static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
                                                struct batadv_nc_path *nc_path)
@@ -470,7 +474,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
  * @data: data to hash
  * @size: size of the hash table
  *
- * Returns the selected index in the hash table for the given data.
+ * Return: the selected index in the hash table for the given data.
  */
 static u32 batadv_nc_hash_choose(const void *data, u32 size)
 {
@@ -489,7 +493,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Returns 1 if the two entry are the same, 0 otherwise
+ * Return: 1 if the two entry are the same, 0 otherwise
  */
 static int batadv_nc_hash_compare(const struct hlist_node *node,
                                  const void *data2)
@@ -516,7 +520,7 @@ static int batadv_nc_hash_compare(const struct hlist_node *node,
  * @hash: hash table containing the nc path
  * @data: search key
  *
- * Returns the nc_path if found, NULL otherwise.
+ * Return: the nc_path if found, NULL otherwise.
  */
 static struct batadv_nc_path *
 batadv_nc_hash_find(struct batadv_hashtable *hash,
@@ -571,7 +575,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
  * timeout. If so, the packet is no longer kept and the entry deleted from the
  * queue. Has to be called with the appropriate locks.
  *
- * Returns false as soon as the entry in the fifo queue has not been timed out
+ * Return: false as soon as the entry in the fifo queue has not been timed out
  * yet and true otherwise.
  */
 static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
@@ -610,7 +614,7 @@ out:
  * packet is no longer delayed, immediately sent and the entry deleted from the
  * queue. Has to be called with the appropriate locks.
  *
- * Returns false as soon as the entry in the fifo queue has not been timed out
+ * Return: false as soon as the entry in the fifo queue has not been timed out
  * yet and true otherwise.
  */
 static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
@@ -731,7 +735,7 @@ static void batadv_nc_worker(struct work_struct *work)
  * @orig_node: neighboring orig node which may be used as nc candidate
  * @ogm_packet: incoming ogm packet also used for the checks
  *
- * Returns true if:
+ * Return: true if:
  *  1) The OGM must have the most recent sequence number.
  *  2) The TTL must be decremented by one and only one.
  *  3) The OGM must be received from the first hop from orig_node.
@@ -772,7 +776,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
  *  (can be equal to orig_node)
  * @in_coding: traverse incoming or outgoing network coding list
  *
- * Returns the nc_node if found, NULL otherwise.
+ * Return: the nc_node if found, NULL otherwise.
  */
 static struct batadv_nc_node
 *batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
@@ -814,7 +818,7 @@ static struct batadv_nc_node
  *  (can be equal to orig_node)
  * @in_coding: traverse incoming or outgoing network coding list
  *
- * Returns the nc_node if found or created, NULL in case of an error.
+ * Return: the nc_node if found or created, NULL in case of an error.
  */
 static struct batadv_nc_node
 *batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
@@ -932,7 +936,7 @@ out:
  * @src: ethernet source address - first half of the nc path search key
  * @dst: ethernet destination address - second half of the nc path search key
  *
- * Returns pointer to nc_path if the path was found or created, returns NULL
+ * Return: pointer to nc_path if the path was found or created, returns NULL
  * on error.
  */
 static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
@@ -989,6 +993,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
  * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
+ *
+ * Return: scaled tq value
  */
 static u8 batadv_nc_random_weight_tq(u8 tq)
 {
@@ -1029,7 +1035,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
  * @nc_packet: structure containing the packet to the skb can be coded with
  * @neigh_node: next hop to forward packet to
  *
- * Returns true if both packets are consumed, false otherwise.
+ * Return: true if both packets are consumed, false otherwise.
  */
 static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb,
@@ -1228,7 +1234,7 @@ out:
  * Since the source encoded the packet we can be certain it has all necessary
  * decode information.
  *
- * Returns true if coding of a decoded packet is allowed.
+ * Return: true if coding of a decoded packet is allowed.
  */
 static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 {
@@ -1246,7 +1252,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
  * @skb: data skb to forward
  * @eth_dst: next hop mac address of skb
  *
- * Returns true if coding of a decoded skb is allowed.
+ * Return: true if coding of a decoded skb is allowed.
  */
 static struct batadv_nc_packet *
 batadv_nc_path_search(struct batadv_priv *bat_priv,
@@ -1314,7 +1320,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
  * @eth_src: source mac address of skb
  * @in_nc_node: pointer to skb next hop's neighbor nc node
  *
- * Returns an nc packet if a suitable coding packet was found, NULL otherwise.
+ * Return: an nc packet if a suitable coding packet was found, NULL otherwise.
  */
 static struct batadv_nc_packet *
 batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
@@ -1397,7 +1403,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
  * next hop that potentially sent a packet which our next hop also received
  * (overheard) and has stored for later decoding.
  *
- * Returns true if the skb was consumed (encoded packet sent) or false otherwise
+ * Return: true if the skb was consumed (encoded packet sent) or false otherwise
  */
 static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
                                     struct batadv_neigh_node *neigh_node,
@@ -1451,7 +1457,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
  * @neigh_node: next hop to forward packet to
  * @packet_id: checksum to identify packet
  *
- * Returns true if the packet was buffered or false in case of an error.
+ * Return: true if the packet was buffered or false in case of an error.
  */
 static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
                                      struct batadv_nc_path *nc_path,
@@ -1485,7 +1491,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
  *
- * Returns true if the skb was consumed (encoded packet sent) or false otherwise
+ * Return: true if the skb was consumed (encoded packet sent) or false otherwise
  */
 bool batadv_nc_skb_forward(struct sk_buff *skb,
                           struct batadv_neigh_node *neigh_node)
@@ -1624,7 +1630,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
  * @skb: unicast skb to decode
  * @nc_packet: decode data needed to decode the skb
  *
- * Returns pointer to decoded unicast packet if the packet was decoded or NULL
+ * Return: pointer to decoded unicast packet if the packet was decoded or NULL
  * in case of an error.
  */
 static struct batadv_unicast_packet *
@@ -1718,7 +1724,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * @ethhdr: pointer to the ethernet header inside the coded packet
  * @coded: coded packet we try to find decode data for
  *
- * Returns pointer to nc packet if the needed data was found or NULL otherwise.
+ * Return: pointer to nc packet if the needed data was found or NULL otherwise.
  */
 static struct batadv_nc_packet *
 batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
@@ -1781,6 +1787,9 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
  *  resulting unicast packet
  * @skb: incoming coded packet
  * @recv_if: pointer to interface this packet was received on
+ *
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
  */
 static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if)
@@ -1865,6 +1874,8 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
  * batadv_nc_nodes_seq_print_text - print the nc node information
  * @seq: seq file to print on
  * @offset: not used
+ *
+ * Return: always 0
  */
 int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
 {
@@ -1927,6 +1938,8 @@ out:
 /**
  * batadv_nc_init_debugfs - create nc folder and related files in debugfs
  * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
 {
index 8f6d4ad8778ade1766f026e8bbd520ce37a95451..d6d7fb4ec5d595ae996b983309e6ffc1246338c9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
index fe578f75c39137c451fcc307e729046bd1d1c3c0..d4a30db0158a25908e4d13c51068c9c02eaf6fbd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -47,7 +47,13 @@ static struct lock_class_key batadv_orig_hash_lock_class_key;
 
 static void batadv_purge_orig(struct work_struct *work);
 
-/* returns 1 if they are the same originator */
+/**
+ * batadv_compare_orig - comparing function used in the originator hash table
+ * @node: node in the local table
+ * @data2: second object to compare the node to
+ *
+ * Return: 1 if they are the same originator
+ */
 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_orig_node,
@@ -61,7 +67,7 @@ int batadv_compare_orig(const struct hlist_node *node, const void *data2)
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
- * Returns the vlan object identified by vid and belonging to orig_node or NULL
+ * Return: the vlan object identified by vid and belonging to orig_node or NULL
  * if it does not exist.
  */
 struct batadv_orig_node_vlan *
@@ -93,7 +99,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
- * Returns NULL in case of failure or the vlan object identified by vid and
+ * Return: NULL in case of failure or the vlan object identified by vid and
  * belonging to orig_node otherwise. The object is created and added to the list
  * if it does not exist.
  *
@@ -266,7 +272,7 @@ void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
  *
- * Returns the neighbor which should be router for this orig_node/iface.
+ * Return: the neighbor which should be router for this orig_node/iface.
  *
  * The object is returned with refcounter increased by 1.
  */
@@ -298,7 +304,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
- * Returns the requested orig_ifinfo or NULL if not found.
+ * Return: the requested orig_ifinfo or NULL if not found.
  *
  * The object is returned with refcounter increased by 1.
  */
@@ -330,7 +336,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
- * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
+ * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
  * interface otherwise. The object is created and added to the list
  * if it does not exist.
  *
@@ -375,12 +381,12 @@ out:
 
 /**
  * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
- * @neigh_node: the neigh node to be queried
+ * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
  * The object is returned with refcounter increased by 1.
  *
- * Returns the requested neigh_ifinfo or NULL if not found
+ * Return: the requested neigh_ifinfo or NULL if not found
  */
 struct batadv_neigh_ifinfo *
 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
@@ -408,10 +414,10 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
 
 /**
  * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
- * @neigh_node: the neigh node to be queried
+ * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
- * Returns NULL in case of failure or the neigh_ifinfo object for the
+ * Return: NULL in case of failure or the neigh_ifinfo object for the
  * if_outgoing interface otherwise. The object is created and added to the list
  * if it does not exist.
  *
@@ -459,7 +465,8 @@ out:
  *
  * Looks for and possibly returns a neighbour belonging to this originator list
  * which is connected through the provided hard interface.
- * Returns NULL if the neighbour is not found.
+ *
+ * Return: neighbor when found. Othwerwise NULL
  */
 static struct batadv_neigh_node *
 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
@@ -492,7 +499,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  *
- * Returns the hardif neighbour node if found or created or NULL otherwise.
+ * Return: the hardif neighbour node if found or created or NULL otherwise.
  */
 static struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
@@ -540,7 +547,7 @@ out:
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  *
- * Returns the hardif neighbour node if found or created or NULL otherwise.
+ * Return: the hardif neighbour node if found or created or NULL otherwise.
  */
 static struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
@@ -562,7 +569,8 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
  * @neigh_addr: the address of the neighbour
  *
  * Looks for and possibly returns a neighbour belonging to this hard interface.
- * Returns NULL if the neighbour is not found.
+ *
+ * Return: neighbor when found. Othwerwise NULL
  */
 struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
@@ -594,7 +602,8 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
  * @neigh_addr: the mac address of the neighbour interface
  *
  * Allocates a new neigh_node object and initialises all the generic fields.
- * Returns the new object or NULL on failure.
+ *
+ * Return: neighbor when found. Othwerwise NULL
  */
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
@@ -656,7 +665,7 @@ out:
  * @seq: neighbour table seq_file struct
  * @offset: not used
  *
- * Always returns 0.
+ * Return: always 0
  */
 int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 {
@@ -820,7 +829,8 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
  *
  * Creates a new originator object and initialise all the generic fields.
  * The new object is not added to the originator list.
- * Returns the newly created object or NULL on failure.
+ *
+ * Return: the newly created object or NULL on failure.
  */
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const u8 *addr)
@@ -937,7 +947,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
- * Returns true if any ifinfo entry was purged, false otherwise.
+ * Return: true if any ifinfo entry was purged, false otherwise.
  */
 static bool
 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
@@ -989,7 +999,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
- * Returns true if any neighbor was purged, false otherwise
+ * Return: true if any neighbor was purged, false otherwise
  */
 static bool
 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
@@ -1048,7 +1058,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
  * @orig_node: orig node which is to be checked
  * @if_outgoing: the interface for which the metric should be compared
  *
- * Returns the current best neighbor, with refcount increased.
+ * Return: the current best neighbor, with refcount increased.
  */
 static struct batadv_neigh_node *
 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
@@ -1085,7 +1095,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
  * This function checks if the orig_node or substructures of it have become
  * obsolete, and purges this information if that's the case.
  *
- * Returns true if the orig_node is to be removed, false otherwise.
+ * Return: true if the orig_node is to be removed, false otherwise.
  */
 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                                   struct batadv_orig_node *orig_node)
@@ -1230,7 +1240,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
  * @seq: debugfs table seq_file struct
  * @offset: not used
  *
- * Returns 0
+ * Return: 0
  */
 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
 {
index cf0730414ed22e0a99a0415202536b13ad2ae479..745b4e4fcdc4d4b316ac8e9ab84c3e56a07a35fc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 0558e3237e0e7e38a42ff7cfc7f266dc90f69709..e7f915181abaedd51dfda248d7a9d08bf66fecb0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -158,7 +158,7 @@ enum batadv_tt_client_flags {
 };
 
 /**
- * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
  * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
  */
 enum batadv_vlan_flags {
@@ -209,6 +209,11 @@ struct batadv_bla_claim_dst {
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
  * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @seqno: sequence identification
+ * @orig: address of the source node
+ * @prev_sender: address of the previous sender
+ * @reserved: reserved byte for alignment
+ * @tq: transmission quality
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
@@ -230,7 +235,7 @@ struct batadv_ogm_packet {
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
 /**
- * batadv_icmp_header - common members among all the ICMP packets
+ * struct batadv_icmp_header - common members among all the ICMP packets
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
@@ -256,7 +261,7 @@ struct batadv_icmp_header {
 };
 
 /**
- * batadv_icmp_packet - ICMP packet
+ * struct batadv_icmp_packet - ICMP packet
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
@@ -282,7 +287,7 @@ struct batadv_icmp_packet {
 #define BATADV_RR_LEN 16
 
 /**
- * batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
@@ -345,6 +350,7 @@ struct batadv_unicast_packet {
  * @u: common unicast packet header
  * @src: address of the source
  * @subtype: packet subtype
+ * @reserved: reserved byte for alignment
  */
 struct batadv_unicast_4addr_packet {
        struct batadv_unicast_packet u;
@@ -413,7 +419,6 @@ struct batadv_bcast_packet {
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
- * @reserved: Align following fields to 2-byte boundaries
  * @first_source: original source of first included packet
  * @first_orig_dest: original destinal of first included packet
  * @first_crc: checksum of first included packet
@@ -495,7 +500,7 @@ struct batadv_tvlv_gateway_data {
  * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
  * @flags: translation table flags (see batadv_tt_data_flags)
  * @ttvn: translation table version number
- * @vlan_num: number of announced VLANs. In the TVLV this struct is followed by
+ * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
index e4f2646d92463a915b3a52a43b46c6f0736b5e52..f4b60b1fb50edb2d9a063047340eb008b9375244 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -140,9 +140,17 @@ out:
                batadv_neigh_node_free_ref(router);
 }
 
-/* checks whether the host restarted and is in the protection time.
- * returns:
- *  0 if the packet is to be accepted
+/**
+ * batadv_window_protected - checks whether the host restarted and is in the
+ *  protection time.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq_num_diff: difference between the current/received sequence number and
+ *  the last sequence number
+ * @last_reset: jiffies timestamp of the last reset, will be updated when reset
+ *  is detected
+ *
+ * Return:
+ *  0 if the packet is to be accepted.
  *  1 if the packet is to be ignored.
  */
 int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
@@ -198,7 +206,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: icmp packet to process
  *
- * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  * otherwise.
  */
 static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
@@ -398,10 +406,11 @@ out:
  * @skb: packet to check
  * @hdr_size: size of header to pull
  *
- * Check for short header and bad addresses in given packet. Returns negative
- * value when check fails and 0 otherwise. The negative value depends on the
- * reason: -ENODATA for bad header, -EBADR for broadcast destination or source,
- * and -EREMOTE for non-local (other host) destination.
+ * Check for short header and bad addresses in given packet.
+ *
+ * Return: negative value when check fails and 0 otherwise. The negative value
+ * depends on the reason: -ENODATA for bad header, -EBADR for broadcast
+ * destination or source, and -EREMOTE for non-local (other host) destination.
  */
 static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
                                       struct sk_buff *skb, int hdr_size)
@@ -435,7 +444,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
  * @orig_node: the destination node
  * @recv_if: pointer to interface this packet was received on
  *
- * Returns the router which should be used for this orig_node on
+ * Return: the router which should be used for this orig_node on
  * this interface, or NULL if not available.
  */
 struct batadv_neigh_node *
@@ -648,7 +657,7 @@ out:
  * the new corresponding information (originator address where the destination
  * client currently is and its known TTVN)
  *
- * Returns true if the packet header has been updated, false otherwise
+ * Return: true if the packet header has been updated, false otherwise
  */
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
@@ -805,7 +814,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
  *
- * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  * otherwise.
  */
 int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
@@ -904,9 +913,8 @@ rx_success:
  * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
- * @dst_addr: the payload destination
  *
- * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  * otherwise.
  */
 int batadv_recv_unicast_tvlv(struct sk_buff *skb,
@@ -960,7 +968,7 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
  * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
  * lack further fragments; 3) Merge fragments, if we have all needed parts.
  *
- * Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
  */
 int batadv_recv_frag_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
index 204bbe4952a6d27848f78fd932b8c077baea8382..c776e9655b9b4f0cb63888729a4132c378905902 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 782fa33ec296a85a2869e40fec9dabc6c89da923..d8b03fd604e0f7e6970a44f66615b145b22575e9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -111,7 +111,7 @@ send_skb_err:
  * host, NULL can be passed as recv_if and no interface alternating is
  * attempted.
  *
- * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
+ * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
  * NET_XMIT_POLICED if the skb is buffered for later transmit.
  */
 int batadv_send_skb_to_orig(struct sk_buff *skb,
@@ -165,7 +165,7 @@ out:
  * @hdr_size: amount of bytes to push at the beginning of the skb
  * @orig_node: the destination node
  *
- * Returns false if the buffer extension was not possible or true otherwise.
+ * Return: false if the buffer extension was not possible or true otherwise.
  */
 static bool
 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
@@ -196,7 +196,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
- * Returns false if the payload could not be encapsulated or true otherwise.
+ * Return: false if the payload could not be encapsulated or true otherwise.
  */
 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
                                            struct batadv_orig_node *orig_node)
@@ -211,10 +211,10 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
  *  unicast 4addr header
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
+ * @orig: the destination node
  * @packet_subtype: the unicast 4addr packet subtype to use
  *
- * Returns false if the payload could not be encapsulated or true otherwise.
+ * Return: false if the payload could not be encapsulated or true otherwise.
  */
 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb,
@@ -265,7 +265,7 @@ out:
  * as packet_type. Then send this frame to the given orig_node and release a
  * reference to this orig_node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
                            struct sk_buff *skb, int packet_type,
@@ -339,7 +339,7 @@ out:
  * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
  * to the according destination node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb, int packet_type,
@@ -373,7 +373,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
  * Look up the currently selected gateway. Wrap the given skb into a batman-adv
  * unicast header and send this frame to this gateway node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
                           unsigned short vid)
@@ -430,14 +430,19 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                           send_time);
 }
 
-/* add a broadcast packet to the queue and setup timers. broadcast packets
- * are sent multiple times to increase probability for being received.
+/**
+ * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
  *
- * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
- * errors.
+ * add a broadcast packet to the queue and setup timers. broadcast packets
+ * are sent multiple times to increase probability for being received.
  *
  * The skb is not consumed, so the caller should make sure that the
  * skb is freed.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
  */
 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                                    const struct sk_buff *skb,
index 82059f259e4682d52997eb1a0f8692c627569ec9..7ff95cada2e743c6981fe443528133c590771b44 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -69,7 +69,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * header via the translation table. Wrap the given skb into a batman-adv
  * unicast header. Then send this frame to the according destination node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, u8 *dst_hint,
@@ -92,7 +92,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
  * unicast-4addr header. Then send this frame to the according destination
  * node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
                                               struct sk_buff *skb,
index ac4d08de5df46abc5c7986b29eb89627fe160fc3..4bf35b8c3d238ef51232276678a439e67734f0b9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -480,7 +480,7 @@ out:
 /**
  * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and
  *  possibly free it
- * @softif_vlan: the vlan object to release
+ * @vlan: the vlan object to release
  */
 void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
@@ -501,7 +501,7 @@ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the identifier of the vlan object to retrieve
  *
- * Returns the private data of the vlan matching the vid passed as argument or
+ * Return: the private data of the vlan matching the vid passed as argument or
  * NULL otherwise. The refcounter of the returned object is incremented by 1.
  */
 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
@@ -530,7 +530,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
- * Returns 0 on success, a negative error otherwise.
+ * Return: 0 on success, a negative error otherwise.
  */
 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 {
@@ -594,12 +594,13 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
 /**
  * batadv_interface_add_vid - ndo_add_vid API implementation
  * @dev: the netdev of the mesh interface
+ * @proto: protocol of the the vlan id
  * @vid: identifier of the new vlan
  *
  * Set up all the internal structures for handling the new vlan on top of the
  * mesh interface
  *
- * Returns 0 on success or a negative error code in case of failure.
+ * Return: 0 on success or a negative error code in case of failure.
  */
 static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
                                    unsigned short vid)
@@ -651,12 +652,13 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 /**
  * batadv_interface_kill_vid - ndo_kill_vid API implementation
  * @dev: the netdev of the mesh interface
+ * @proto: protocol of the the vlan id
  * @vid: identifier of the deleted vlan
  *
  * Destroy all the internal structures used to handle the vlan identified by vid
  * on top of the mesh interface
  *
- * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
  * or -ENOENT if the specified vlan id wasn't registered.
  */
 static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
@@ -745,7 +747,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
  * batadv_softif_init_late - late stage initialization of soft interface
  * @dev: registered network device to modify
  *
- * Returns error code on failures
+ * Return: error code on failures
  */
 static int batadv_softif_init_late(struct net_device *dev)
 {
@@ -847,7 +849,7 @@ free_bat_counters:
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should become the slave interface
  *
- * Return 0 if successful or error otherwise.
+ * Return: 0 if successful or error otherwise.
  */
 static int batadv_softif_slave_add(struct net_device *dev,
                                   struct net_device *slave_dev)
@@ -872,7 +874,7 @@ out:
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should be removed from the master interface
  *
- * Return 0 if successful or error otherwise.
+ * Return: 0 if successful or error otherwise.
  */
 static int batadv_softif_slave_del(struct net_device *dev,
                                   struct net_device *slave_dev)
index 8e82176f40b1f4f5705f4b213b94f2971f885ac8..d17cfbacf8093fb125ace2965ee8dcaec4c60a2d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index fe87777fda8a0a0e2074adffd9b833284c370006..964fc5986b2c1eb6521be386264f4702745a87b5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -64,7 +64,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
  * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
  * @obj: kobject to covert
  *
- * Returns the associated batadv_priv struct.
+ * Return: the associated batadv_priv struct.
  */
 static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 {
@@ -82,9 +82,10 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 
 /**
  * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * @bat_priv: the bat priv with all the soft interface information
  * @obj: kobject to covert
  *
- * Returns the associated softif_vlan struct if found, NULL otherwise.
+ * Return: the associated softif_vlan struct if found, NULL otherwise.
  */
 static struct batadv_softif_vlan *
 batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
@@ -491,7 +492,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer that will contain the data to send back to the user
  *
- * Returns the number of bytes written into 'buff' on success or a negative
+ * Return: the number of bytes written into 'buff' on success or a negative
  * error code in case of failure
  */
 static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
@@ -511,7 +512,7 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
  * @buff: the buffer containing the user data
  * @count: number of bytes in the buffer
  *
- * Returns 'count' on success or a negative error code in case of failure
+ * Return: 'count' on success or a negative error code in case of failure
  */
 static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
                                           struct attribute *attr, char *buff,
@@ -620,9 +621,7 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
 
 BATADV_ATTR_VLAN_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
 
-/**
- * batadv_vlan_attrs - array of vlan specific sysfs attributes
- */
+/* array of vlan specific sysfs attributes */
 static struct batadv_attribute *batadv_vlan_attrs[] = {
        &batadv_attr_vlan_ap_isolation,
        NULL,
@@ -683,7 +682,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
  * @dev: netdev of the mesh interface
  * @vlan: private data of the newly added VLAN interface
  *
- * Returns 0 on success and -ENOMEM if any of the structure allocations fails.
+ * Return: 0 on success and -ENOMEM if any of the structure allocations fails.
  */
 int batadv_sysfs_add_vlan(struct net_device *dev,
                          struct batadv_softif_vlan *vlan)
index 61974428a7af3c9c9494f25f57d52fd2811bc923..c76021b4e1980a75bb8daa366b95d53e653094f4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index cdfc85fa2743c78d4e0e3e269085ec5d6f1fb22f..0dc8a5ca33bf83367862303c17439e319da7864a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
@@ -68,7 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 unsigned short vid, const char *message,
                                 bool roaming);
 
-/* returns 1 if they are the same mac addr and vid */
+/**
+ * batadv_compare_tt - check if two TT entries are the same
+ * @node: the list element pointer of the first TT entry
+ * @data2: pointer to the tt_common_entry of the second TT entry
+ *
+ * Compare the MAC address and the VLAN ID of the two TT entries and check if
+ * they are the same TT client.
+ * Return: 1 if the two TT clients are the same, 0 otherwise
+ */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_tt_common_entry,
@@ -84,7 +92,7 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
  * @data: pointer to the tt_common_entry object to map
  * @size: the size of the hash table
  *
- * Returns the hash index where the object represented by 'data' should be
+ * Return: the hash index where the object represented by 'data' should be
  * stored at.
  */
 static inline u32 batadv_choose_tt(const void *data, u32 size)
@@ -105,7 +113,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
  *
- * Returns a pointer to the tt_common struct belonging to the searched client if
+ * Return: a pointer to the tt_common struct belonging to the searched client if
  * found, NULL otherwise.
  */
 static struct batadv_tt_common_entry *
@@ -150,7 +158,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
  *
- * Returns a pointer to the corresponding tt_local_entry struct if the client is
+ * Return: a pointer to the corresponding tt_local_entry struct if the client is
  * found, NULL otherwise.
  */
 static struct batadv_tt_local_entry *
@@ -175,7 +183,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
  *
- * Returns a pointer to the corresponding tt_global_entry struct if the client
+ * Return: a pointer to the corresponding tt_global_entry struct if the client
  * is found, NULL otherwise.
  */
 static struct batadv_tt_global_entry *
@@ -217,11 +225,11 @@ batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
 
 /**
  * batadv_tt_global_hash_count - count the number of orig entries
- * @hash: hash table containing the tt entries
+ * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to count entries for
  * @vid: VLAN identifier
  *
- * Return the number of originators advertising the given address/data
+ * Return: the number of originators advertising the given address/data
  * (excluding ourself).
  */
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
@@ -286,9 +294,9 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_size_mod - change the size by v of the local table
- *  identified by vid
- * @bat_priv: the bat priv with all the soft interface information
+ * batadv_tt_global_size_mod - change the size by v of the global table
+ *  for orig_node identified by vid
+ * @orig_node: the originator for which the table has to be modified
  * @vid: the VLAN identifier
  * @v: the amount to sum to the global table size
  */
@@ -435,7 +443,7 @@ unlock:
  * batadv_tt_len - compute length in bytes of given number of tt changes
  * @changes_num: number of tt changes
  *
- * Returns computed length in bytes.
+ * Return: computed length in bytes.
  */
 static int batadv_tt_len(int changes_num)
 {
@@ -446,7 +454,7 @@ static int batadv_tt_len(int changes_num)
  * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
  * @tt_len: available space
  *
- * Returns the number of entries.
+ * Return: the number of entries.
  */
 static u16 batadv_tt_entries(u16 tt_len)
 {
@@ -458,7 +466,7 @@ static u16 batadv_tt_entries(u16 tt_len)
  *  size when transmitted over the air
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns local translation table size in bytes.
+ * Return: local translation table size in bytes.
  */
 static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
 {
@@ -524,7 +532,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
  * @mark: the value contained in the skb->mark field of the received packet (if
  *  any)
  *
- * Returns true if the client was successfully added, false otherwise.
+ * Return: true if the client was successfully added, false otherwise.
  */
 bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
                         unsigned short vid, int ifindex, u32 mark)
@@ -719,12 +727,11 @@ out:
  *  function reserves the amount of space needed to send the entire global TT
  *  table. In case of success the value is updated with the real amount of
  *  reserved bytes
-
  * Allocate the needed amount of memory for the entire TT TVLV and write its
  * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
  * objects, one per active VLAN served by the originator node.
  *
- * Return the size of the allocated buffer or 0 in case of failure.
+ * Return: the size of the allocated buffer or 0 in case of failure.
  */
 static u16
 batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
@@ -798,7 +805,7 @@ out:
  * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
  * objects, one per active VLAN.
  *
- * Return the size of the allocated buffer or 0 in case of failure.
+ * Return: the size of the allocated buffer or 0 in case of failure.
  */
 static u16
 batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
@@ -1040,7 +1047,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
  * @message: message to append to the log on deletion
  * @roaming: true if the deletion is due to a roaming event
  *
- * Returns the flags assigned to the local entry before being deleted
+ * Return: the flags assigned to the local entry before being deleted
  */
 u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
                           unsigned short vid, const char *message,
@@ -1240,10 +1247,16 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
-/* retrieves the orig_tt_list_entry belonging to orig_node from the
+/**
+ * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * @entry: the TT global entry where the orig_list_entry has to be
+ *  extracted from
+ * @orig_node: the originator for which the orig_list_entry has to be found
+ *
+ * retrieve the orig_tt_list_entry belonging to orig_node from the
  * batadv_tt_global_entry list
  *
- * returns it with an increased refcounter, NULL if not found
+ * Return: it with an increased refcounter, NULL if not found
  */
 static struct batadv_tt_orig_list_entry *
 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
@@ -1268,8 +1281,15 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
        return orig_entry;
 }
 
-/* find out if an orig_node is already in the list of a tt_global_entry.
- * returns true if found, false otherwise
+/**
+ * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
+ *  by a given originator
+ * @entry: the TT global entry to check
+ * @orig_node: the originator to search in the list
+ *
+ * find out if an orig_node is already in the list of a tt_global_entry.
+ *
+ * Return: true if found, false otherwise
  */
 static bool
 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
@@ -1341,7 +1361,7 @@ out:
  *
  * The caller must hold orig_node refcount.
  *
- * Return true if the new entry has been added, false otherwise
+ * Return: true if the new entry has been added, false otherwise
  */
 static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
@@ -1499,7 +1519,7 @@ out:
  * @tt_global_entry: global translation table entry to be analyzed
  *
  * This functon assumes the caller holds rcu_read_lock().
- * Returns best originator list entry or NULL on errors.
+ * Return: best originator list entry or NULL on errors.
  */
 static struct batadv_tt_orig_list_entry *
 batadv_transtable_best_orig(struct batadv_priv *bat_priv,
@@ -2029,7 +2049,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
  * @addr: mac address of the destination client
  * @vid: VLAN identifier
  *
- * Returns a pointer to the originator that was selected as destination in the
+ * Return: a pointer to the originator that was selected as destination in the
  * mesh for contacting the client 'addr', NULL otherwise.
  * In case of multiple originators serving the same client, the function returns
  * the best one (best in terms of metric towards the destination node).
@@ -2104,7 +2124,7 @@ out:
  * because the XOR operation can combine them all while trying to reduce the
  * noise as much as possible.
  *
- * Returns the checksum of the global table of a given originator.
+ * Return: the checksum of the global table of a given originator.
  */
 static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                struct batadv_orig_node *orig_node,
@@ -2181,7 +2201,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
  * For details about the computation, please refer to the documentation for
  * batadv_tt_global_crc().
  *
- * Returns the checksum of the local table
+ * Return: the checksum of the local table
  */
 static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
                               unsigned short vid)
@@ -2287,7 +2307,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node this request is being issued for
  *
- * Returns the pointer to the new tt_req_node struct if no request
+ * Return: the pointer to the new tt_req_node struct if no request
  * has already been issued for this orig_node, NULL otherwise.
  */
 static struct batadv_tt_req_node *
@@ -2322,7 +2342,7 @@ unlock:
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
- * Returns 1 if the entry is a valid, 0 otherwise.
+ * Return: 1 if the entry is a valid, 0 otherwise.
  */
 static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
@@ -2406,9 +2426,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
  * @orig_node: originator for which the CRCs have to be checked
  * @tt_vlan: pointer to the first tvlv VLAN entry
  * @num_vlan: number of tvlv VLAN entries
- * @create: if true, create VLAN objects if not found
  *
- * Return true if all the received CRCs match the locally stored ones, false
+ * Return: true if all the received CRCs match the locally stored ones, false
  * otherwise
  */
 static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
@@ -2511,6 +2530,8 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
  * @num_vlan: number of tvlv VLAN entries
  * @full_table: ask for the entire translation table if true, while only for the
  *  last TT diff otherwise
+ *
+ * Return: true if the TT Request was sent, false otherwise
  */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
                                  struct batadv_orig_node *dst_orig_node,
@@ -2591,7 +2612,7 @@ out:
  * @req_src: mac address of tt request sender
  * @req_dst: mac address of tt request recipient
  *
- * Returns true if tt request reply was sent, false otherwise.
+ * Return: true if tt request reply was sent, false otherwise.
  */
 static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                                          struct batadv_tvlv_tt_data *tt_data,
@@ -2723,7 +2744,7 @@ out:
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
  *
- * Returns true if tt request reply was sent, false otherwise.
+ * Return: true if tt request reply was sent, false otherwise.
  */
 static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                                       struct batadv_tvlv_tt_data *tt_data,
@@ -2841,7 +2862,7 @@ out:
  * @req_src: mac address of tt request sender
  * @req_dst: mac address of tt request recipient
  *
- * Returns true if tt request reply was sent, false otherwise.
+ * Return: true if tt request reply was sent, false otherwise.
  */
 static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
                                    struct batadv_tvlv_tt_data *tt_data,
@@ -2936,7 +2957,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
  *
- * Returns true if the client is served by this node, false otherwise.
+ * Return: true if the client is served by this node, false otherwise.
  */
 bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
                         unsigned short vid)
@@ -3053,11 +3074,16 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
-/* This function checks whether the client already reached the
+/**
+ * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ *
+ * This function checks whether the client already reached the
  * maximum number of possible roaming phases. In this case the ROAMING_ADV
  * will not be sent.
  *
- * returns true if the ROAMING_ADV can be sent, false otherwise
+ * Return: true if the ROAMING_ADV can be sent, false otherwise
  */
 static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
 {
@@ -3369,13 +3395,12 @@ out:
  * batadv_tt_update_orig - update global translation table with new tt
  *  information received via ogms
  * @bat_priv: the bat priv with all the soft interface information
- * @orig: the orig_node of the ogm
- * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @orig_node: the orig_node of the ogm
+ * @tt_buff: pointer to the first tvlv VLAN entry
  * @tt_num_vlan: number of tvlv VLAN entries
  * @tt_change: pointer to the first entry in the TT buffer
  * @tt_num_changes: number of tt changes inside the tt buffer
  * @ttvn: translation table version number of this changeset
- * @tt_crc: crc32 checksum of orig node's translation table
  */
 static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                                  struct batadv_orig_node *orig_node,
@@ -3457,7 +3482,7 @@ request_table:
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
  *
- * Returns true if we know that the client has moved from its old originator
+ * Return: true if we know that the client has moved from its old originator
  * to another one. This entry is still kept for consistency purposes and will be
  * deleted later by a DEL or because of timeout
  */
@@ -3483,7 +3508,7 @@ out:
  * @addr: the mac address of the local client to query
  * @vid: VLAN identifier
  *
- * Returns true if the local client is known to be roaming (it is not served by
+ * Return: true if the local client is known to be roaming (it is not served by
  * this node anymore) or not. If yes, the client is still present in the table
  * to keep the latter consistent with the node TTVN
  */
@@ -3612,7 +3637,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
  * @tvlv_value: tvlv buffer containing the tt data
  * @tvlv_value_len: tvlv buffer length
  *
- * Returns NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * Return: NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
  * otherwise.
  */
 static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
@@ -3693,7 +3718,7 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
  * @tvlv_value: tvlv buffer containing the tt data
  * @tvlv_value_len: tvlv buffer length
  *
- * Returns NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * Return: NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
  * otherwise.
  */
 static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
@@ -3739,7 +3764,7 @@ out:
  * batadv_tt_init - initialise the translation table internals
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Return 0 on success or negative error number in case of failure.
+ * Return: 0 on success or negative error number in case of failure.
  */
 int batadv_tt_init(struct batadv_priv *bat_priv)
 {
@@ -3777,7 +3802,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
  * @addr: the mac address of the client
  * @vid: the identifier of the VLAN where this client is connected
  *
- * Returns true if the client is marked with the TT_CLIENT_ISOLA flag, false
+ * Return: true if the client is marked with the TT_CLIENT_ISOLA flag, false
  * otherwise
  */
 bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
index abd8e116e5fb0dad0ca4bd74d18ad8e77490d024..7c7e2c006bfe07d48ce81c5bfa7d08b495ce2245 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
index 3437b667a2cd670965cc12479c2b2169f7020803..8974bc0dc15cabd44926dba01ad8064d912fcb4a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -73,7 +73,7 @@ enum batadv_dhcp_recipient {
 #define BATADV_TT_SYNC_MASK    0x00F0
 
 /**
- * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
+ * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
  * @ogm_buff: buffer holding the OGM packet
  * @ogm_buff_len: length of the OGM packet buffer
  * @ogm_seqno: OGM sequence number - used to identify each OGM
@@ -97,8 +97,8 @@ struct batadv_hard_iface_bat_iv {
  *  batman-adv for this interface
  * @soft_iface: the batman-adv interface which uses this network interface
  * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: BATMAN IV specific per hard interface data
- * @cleanup_work: work queue callback item for hard interface deinit
+ * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
+ * @cleanup_work: work queue callback item for hard-interface deinit
  * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
  * @neigh_list: list of unique single hop neighbors via this interface
  * @neigh_list_lock: lock protecting neigh_list
@@ -125,7 +125,7 @@ struct batadv_hard_iface {
 /**
  * struct batadv_orig_ifinfo - originator info per outgoing interface
  * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard interface
+ * @if_outgoing: pointer to outgoing hard-interface
  * @router: router that should be used to reach this originator
  * @last_real_seqno: last and best known sequence number
  * @last_ttl: ttl of last received packet
@@ -202,7 +202,7 @@ struct batadv_orig_node_vlan {
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard interface) where each one counts
+ * @bcast_own: set of bitfields (one per hard-interface) where each one counts
  * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
  * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
  * @bcast_own_sum: sum of bcast_own
@@ -346,10 +346,11 @@ struct batadv_gw_node {
 };
 
 /**
- * batadv_hardif_neigh_node - unique neighbor per hard interface
+ * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
  * @list: list node for batadv_hard_iface::neigh_list
  * @addr: the MAC address of the neighboring interface
- * @if_incoming: pointer to incoming hard interface
+ * @if_incoming: pointer to incoming hard-interface
+ * @last_seen: when last packet via this neighbor was received
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in a RCU-safe manner
  */
@@ -369,7 +370,7 @@ struct batadv_hardif_neigh_node {
  * @addr: the MAC address of the neighboring interface
  * @ifinfo_list: list for routing metrics per outgoing interface
  * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard interface
+ * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -388,7 +389,7 @@ struct batadv_neigh_node {
 
 /**
  * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
- *  interface for BATMAN IV
+ *  interface for B.A.T.M.A.N. IV
  * @tq_recv: ring buffer of received TQ values from this neigh node
  * @tq_index: ring buffer index
  * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
@@ -407,7 +408,7 @@ struct batadv_neigh_ifinfo_bat_iv {
 /**
  * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
  * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard interface
+ * @if_outgoing: pointer to outgoing hard-interface
  * @bat_iv: B.A.T.M.A.N. IV private structure
  * @last_ttl: last received ttl from this neigh node
  * @refcount: number of contexts the object is used
@@ -771,6 +772,9 @@ struct batadv_softif_vlan {
  * @orig_interval: OGM broadcast interval in milliseconds
  * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
  * @log_level: configured log level (see batadv_dbg_level)
+ * @isolation_mark: the skb->mark value used to match packets for AP isolation
+ * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
+ *  for the isolation mark
  * @bcast_seqno: last sent broadcast packet sequence number
  * @bcast_queue_left: number of remaining buffered broadcast packet slots
  * @batman_queue_left: number of remaining OGM packet slots
@@ -783,8 +787,8 @@ struct batadv_softif_vlan {
  * @forw_bat_list_lock: lock protecting forw_bat_list
  * @forw_bcast_list_lock: lock protecting forw_bcast_list
  * @orig_work: work queue callback item for orig node purging
- * @cleanup_work: work queue callback item for soft interface deinit
- * @primary_if: one of the hard interfaces assigned to this mesh interface
+ * @cleanup_work: work queue callback item for soft-interface deinit
+ * @primary_if: one of the hard-interfaces assigned to this mesh interface
  *  becomes the primary interface
  * @bat_algo_ops: routing algorithm used by this mesh interface
  * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
index d040365ba98e7f7cafcbedd20ac3b4c632603414..8a4cc2f7f0db2a277e4281d5be62d76c7446a716 100644 (file)
@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
        /* check that it's our buffer */
        if (lowpan_is_ipv6(*skb_network_header(skb))) {
+               /* Pull off the 1-byte of 6lowpan header. */
+               skb_pull(skb, 1);
+
                /* Copy the packet so that the IPv6 header is
                 * properly aligned.
                 */
@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
+               local_skb->dev = dev;
 
                skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 
@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
                if (!local_skb)
                        goto drop;
 
+               local_skb->dev = dev;
+
                ret = iphc_decompress(local_skb, dev, chan);
                if (ret < 0) {
                        kfree_skb(local_skb);
@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
-               local_skb->dev = dev;
 
                if (give_skb_to_upper(local_skb, dev)
                                != NET_RX_SUCCESS) {
index 95d1a66ba03aa20095932a1c45f9f76af2cc1393..06c31b9a68b0bce3cc4f28224c3c23864f78e7c9 100644 (file)
@@ -69,6 +69,15 @@ config BT_6LOWPAN
        help
          IPv6 compression over Bluetooth Low Energy.
 
+config BT_LEDS
+       bool "Enable LED triggers"
+       depends on BT
+       depends on LEDS_CLASS
+       select LEDS_TRIGGERS
+       help
+         This option selects a few LED triggers for different
+         Bluetooth events.
+
 config BT_SELFTEST
        bool "Bluetooth self testing support"
        depends on BT && DEBUG_KERNEL
index 2b15ae8c1def06642682c488a9439f0e57feeb13..b3ff12eb9b6dcdaa590bacc997cbe2e0e566783e 100644 (file)
@@ -17,6 +17,7 @@ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
 
 bluetooth-$(CONFIG_BT_BREDR) += sco.o
 bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
+bluetooth-$(CONFIG_BT_LEDS) += leds.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
index 47bcef7547967ce544ff0cf5ea7dabecdd080d90..541760fe53d4cf91553fb0b126de340007d453e5 100644 (file)
@@ -40,6 +40,7 @@
 #include "hci_request.h"
 #include "hci_debugfs.h"
 #include "smp.h"
+#include "leds.h"
 
 static void hci_rx_work(struct work_struct *work);
 static void hci_cmd_work(struct work_struct *work);
@@ -1395,6 +1396,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
                set_bit(HCI_UP, &hdev->flags);
                hci_sock_dev_event(hdev, HCI_DEV_UP);
+               hci_leds_update_powered(hdev, true);
                if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
                    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
                    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
@@ -1532,6 +1534,8 @@ int hci_dev_do_close(struct hci_dev *hdev)
                return 0;
        }
 
+       hci_leds_update_powered(hdev, false);
+
        /* Flush RX and TX works */
        flush_work(&hdev->tx_work);
        flush_work(&hdev->rx_work);
@@ -3067,6 +3071,8 @@ int hci_register_dev(struct hci_dev *hdev)
        if (error < 0)
                goto err_wqueue;
 
+       hci_leds_init(hdev);
+
        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
                                    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
                                    hdev);
index 41b5f3813f02f1ed73ffecdc7d685645ded8c265..c78ee2dc93237475eb002ae61a737e0137c387f9 100644 (file)
@@ -688,21 +688,29 @@ static u8 update_white_list(struct hci_request *req)
         * command to remove it from the controller.
         */
        list_for_each_entry(b, &hdev->le_white_list, list) {
-               struct hci_cp_le_del_from_white_list cp;
+               /* If the device is neither in pend_le_conns nor
+                * pend_le_reports then remove it from the whitelist.
+                */
+               if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
+                                              &b->bdaddr, b->bdaddr_type) &&
+                   !hci_pend_le_action_lookup(&hdev->pend_le_reports,
+                                              &b->bdaddr, b->bdaddr_type)) {
+                       struct hci_cp_le_del_from_white_list cp;
+
+                       cp.bdaddr_type = b->bdaddr_type;
+                       bacpy(&cp.bdaddr, &b->bdaddr);
 
-               if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
-                                             &b->bdaddr, b->bdaddr_type) ||
-                   hci_pend_le_action_lookup(&hdev->pend_le_reports,
-                                             &b->bdaddr, b->bdaddr_type)) {
-                       white_list_entries++;
+                       hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+                                   sizeof(cp), &cp);
                        continue;
                }
 
-               cp.bdaddr_type = b->bdaddr_type;
-               bacpy(&cp.bdaddr, &b->bdaddr);
+               if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
+                       /* White list can not be used with RPAs */
+                       return 0x00;
+               }
 
-               hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
-                           sizeof(cp), &cp);
+               white_list_entries++;
        }
 
        /* Since all no longer valid white list entries have been
index 39a5149f301085d45d37a94361e2b86486d852bd..eb4f5f24cbe36a1d4f524978d52d8558f4dbde80 100644 (file)
@@ -197,10 +197,20 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
                chan->sport = psm;
                err = 0;
        } else {
-               u16 p;
+               u16 p, start, end, incr;
+
+               if (chan->src_type == BDADDR_BREDR) {
+                       start = L2CAP_PSM_DYN_START;
+                       end = L2CAP_PSM_AUTO_END;
+                       incr = 2;
+               } else {
+                       start = L2CAP_PSM_LE_DYN_START;
+                       end = L2CAP_PSM_LE_DYN_END;
+                       incr = 1;
+               }
 
                err = -EINVAL;
-               for (p = 0x1001; p < 0x1100; p += 2)
+               for (p = start; p <= end; p += incr)
                        if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
                                chan->psm   = cpu_to_le16(p);
                                chan->sport = cpu_to_le16(p);
index 1bb5515270449e8115a0c169ea5b6380594c40a4..e4cae72895a72bebc4c95b430d688e83cfebfada 100644 (file)
@@ -58,7 +58,7 @@ static int l2cap_validate_bredr_psm(u16 psm)
                return -EINVAL;
 
        /* Restrict usage of well-known PSMs */
-       if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE))
+       if (psm < L2CAP_PSM_DYN_START && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
 
        return 0;
@@ -67,11 +67,11 @@ static int l2cap_validate_bredr_psm(u16 psm)
 static int l2cap_validate_le_psm(u16 psm)
 {
        /* Valid LE_PSM ranges are defined only until 0x00ff */
-       if (psm > 0x00ff)
+       if (psm > L2CAP_PSM_LE_DYN_END)
                return -EINVAL;
 
        /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */
-       if (psm <= 0x007f && !capable(CAP_NET_BIND_SERVICE))
+       if (psm < L2CAP_PSM_LE_DYN_START && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
 
        return 0;
@@ -125,6 +125,9 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                        goto done;
        }
 
+       bacpy(&chan->src, &la.l2_bdaddr);
+       chan->src_type = la.l2_bdaddr_type;
+
        if (la.l2_cid)
                err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
        else
@@ -156,9 +159,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                break;
        }
 
-       bacpy(&chan->src, &la.l2_bdaddr);
-       chan->src_type = la.l2_bdaddr_type;
-
        if (chan->psm && bdaddr_type_is_le(chan->src_type))
                chan->mode = L2CAP_MODE_LE_FLOWCTL;
 
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
new file mode 100644 (file)
index 0000000..8319c84
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "leds.h"
+
+struct hci_basic_led_trigger {
+       struct led_trigger      led_trigger;
+       struct hci_dev          *hdev;
+};
+
+#define to_hci_basic_led_trigger(arg) container_of(arg, \
+                       struct hci_basic_led_trigger, led_trigger)
+
+void hci_leds_update_powered(struct hci_dev *hdev, bool enabled)
+{
+       if (hdev->power_led)
+               led_trigger_event(hdev->power_led,
+                                 enabled ? LED_FULL : LED_OFF);
+}
+
+static void power_activate(struct led_classdev *led_cdev)
+{
+       struct hci_basic_led_trigger *htrig;
+       bool powered;
+
+       htrig = to_hci_basic_led_trigger(led_cdev->trigger);
+       powered = test_bit(HCI_UP, &htrig->hdev->flags);
+
+       led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF);
+}
+
+static struct led_trigger *led_allocate_basic(struct hci_dev *hdev,
+                       void (*activate)(struct led_classdev *led_cdev),
+                       const char *name)
+{
+       struct hci_basic_led_trigger *htrig;
+
+       htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL);
+       if (!htrig)
+               return NULL;
+
+       htrig->hdev = hdev;
+       htrig->led_trigger.activate = activate;
+       htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+                                                "%s-%s", hdev->name,
+                                                name);
+       if (!htrig->led_trigger.name)
+               goto err_alloc;
+
+       if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger))
+               goto err_register;
+
+       return &htrig->led_trigger;
+
+err_register:
+       devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name);
+err_alloc:
+       devm_kfree(&hdev->dev, htrig);
+       return NULL;
+}
+
+void hci_leds_init(struct hci_dev *hdev)
+{
+       /* initialize power_led */
+       hdev->power_led = led_allocate_basic(hdev, power_activate, "power");
+}
diff --git a/net/bluetooth/leds.h b/net/bluetooth/leds.h
new file mode 100644 (file)
index 0000000..a9c4d6e
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if IS_ENABLED(CONFIG_BT_LEDS)
+void hci_leds_update_powered(struct hci_dev *hdev, bool enabled);
+void hci_leds_init(struct hci_dev *hdev);
+#else
+static inline void hci_leds_update_powered(struct hci_dev *hdev,
+                                          bool enabled) {}
+static inline void hci_leds_init(struct hci_dev *hdev) {}
+#endif
index 78ad7d691fbee5b59860d0700be87ad3631360d3..50976a6481f3cd21e33e5ee75d74dacc06470584 100644 (file)
@@ -1065,22 +1065,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
                        hcon->dst_type = smp->remote_irk->addr_type;
                        queue_work(hdev->workqueue, &conn->id_addr_update_work);
                }
-
-               /* When receiving an indentity resolving key for
-                * a remote device that does not use a resolvable
-                * private address, just remove the key so that
-                * it is possible to use the controller white
-                * list for scanning.
-                *
-                * Userspace will have been told to not store
-                * this key at this point. So it is safe to
-                * just remove it.
-                */
-               if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
-                       list_del_rcu(&smp->remote_irk->list);
-                       kfree_rcu(smp->remote_irk, rcu);
-                       smp->remote_irk = NULL;
-               }
        }
 
        if (smp->csrk) {
index a1abe4936fe15299b7643b8944023050c5f938c1..3addc05b9a16676b07ca13bc6faf0cba62b3acfd 100644 (file)
@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
        .notifier_call = br_device_event
 };
 
+/* called with RTNL */
 static int br_switchdev_event(struct notifier_block *unused,
                              unsigned long event, void *ptr)
 {
@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
        struct switchdev_notifier_fdb_info *fdb_info;
        int err = NOTIFY_DONE;
 
-       rtnl_lock();
        p = br_port_get_rtnl(dev);
        if (!p)
                goto out;
@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
        }
 
 out:
-       rtnl_unlock();
        return err;
 }
 
index 30e105f57f0d9a59e1ce03de8531b90718ac8e29..ac089286526ef8b94f39b869275b1f5ff07834ad 100644 (file)
@@ -41,6 +41,14 @@ fail:
        return -EMSGSIZE;
 }
 
+static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
+{
+       e->state = flags & MDB_PG_FLAGS_PERMANENT;
+       e->flags = 0;
+       if (flags & MDB_PG_FLAGS_OFFLOAD)
+               e->flags |= MDB_FLAGS_OFFLOAD;
+}
+
 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev)
 {
@@ -85,8 +93,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                                        struct br_mdb_entry e;
                                        memset(&e, 0, sizeof(e));
                                        e.ifindex = port->dev->ifindex;
-                                       e.state = p->state;
                                        e.vid = p->addr.vid;
+                                       __mdb_entry_fill_flags(&e, p->flags);
                                        if (p->addr.proto == htons(ETH_P_IP))
                                                e.addr.u.ip4 = p->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -209,7 +217,7 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
 }
 
 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
-                           int type)
+                           int type, struct net_bridge_port_group *pg)
 {
        struct switchdev_obj_port_mdb mdb = {
                .obj = {
@@ -232,10 +240,13 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
 #endif
 
        mdb.obj.orig_dev = port_dev;
-       if (port_dev && type == RTM_NEWMDB)
-               switchdev_port_obj_add(port_dev, &mdb.obj);
-       else if (port_dev && type == RTM_DELMDB)
+       if (port_dev && type == RTM_NEWMDB) {
+               err = switchdev_port_obj_add(port_dev, &mdb.obj);
+               if (!err && pg)
+                       pg->flags |= MDB_PG_FLAGS_OFFLOAD;
+       } else if (port_dev && type == RTM_DELMDB) {
                switchdev_port_obj_del(port_dev, &mdb.obj);
+       }
 
        skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
        if (!skb)
@@ -253,21 +264,21 @@ errout:
        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 }
 
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type, u8 state)
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
+                  int type)
 {
        struct br_mdb_entry entry;
 
        memset(&entry, 0, sizeof(entry));
-       entry.ifindex = port->dev->ifindex;
-       entry.addr.proto = group->proto;
-       entry.addr.u.ip4 = group->u.ip4;
+       entry.ifindex = pg->port->dev->ifindex;
+       entry.addr.proto = pg->addr.proto;
+       entry.addr.u.ip4 = pg->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
-       entry.addr.u.ip6 = group->u.ip6;
+       entry.addr.u.ip6 = pg->addr.u.ip6;
 #endif
-       entry.state = state;
-       entry.vid = group->vid;
-       __br_mdb_notify(dev, &entry, type);
+       entry.vid = pg->addr.vid;
+       __mdb_entry_fill_flags(&entry, pg->flags);
+       __br_mdb_notify(dev, &entry, type, pg);
 }
 
 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -412,7 +423,8 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
-                           struct br_ip *group, unsigned char state)
+                           struct br_ip *group, unsigned char state,
+                           struct net_bridge_port_group **pg)
 {
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
@@ -443,6 +455,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (unlikely(!p))
                return -ENOMEM;
        rcu_assign_pointer(*pp, p);
+       *pg = p;
        if (state == MDB_TEMPORARY)
                mod_timer(&p->timer, now + br->multicast_membership_interval);
 
@@ -450,7 +463,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 }
 
 static int __br_mdb_add(struct net *net, struct net_bridge *br,
-                       struct br_mdb_entry *entry)
+                       struct br_mdb_entry *entry,
+                       struct net_bridge_port_group **pg)
 {
        struct br_ip ip;
        struct net_device *dev;
@@ -479,7 +493,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 #endif
 
        spin_lock_bh(&br->multicast_lock);
-       ret = br_mdb_add_group(br, p, &ip, entry->state);
+       ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
        spin_unlock_bh(&br->multicast_lock);
        return ret;
 }
@@ -487,6 +501,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
+       struct net_bridge_port_group *pg;
        struct net_bridge_vlan_group *vg;
        struct net_device *dev, *pdev;
        struct br_mdb_entry *entry;
@@ -516,15 +531,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_vlan_enabled(br) && vg && entry->vid == 0) {
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        entry->vid = v->vid;
-                       err = __br_mdb_add(net, br, entry);
+                       err = __br_mdb_add(net, br, entry, &pg);
                        if (err)
                                break;
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB);
+                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
                }
        } else {
-               err = __br_mdb_add(net, br, entry);
+               err = __br_mdb_add(net, br, entry, &pg);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB);
+                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
        }
 
        return err;
@@ -568,7 +583,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                if (p->port->state == BR_STATE_DISABLED)
                        goto unlock;
 
-               entry->state = p->state;
+               __mdb_entry_fill_flags(entry, p->flags);
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
@@ -620,12 +635,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
                        entry->vid = v->vid;
                        err = __br_mdb_del(br, entry);
                        if (!err)
-                               __br_mdb_notify(dev, entry, RTM_DELMDB);
+                               __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
                }
        } else {
                err = __br_mdb_del(br, entry);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_DELMDB);
+                       __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
        }
 
        return err;
index 03661d97463c0a185b6c688ee281309682209528..8b6e4249be1b09a37047988424c8c60c9d38290e 100644 (file)
@@ -283,8 +283,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
-               br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
-                             p->state);
+               br_mdb_notify(br->dev, p, RTM_DELMDB);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -304,7 +303,7 @@ static void br_multicast_port_group_expired(unsigned long data)
 
        spin_lock(&br->multicast_lock);
        if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
-           hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
+           hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
                goto out;
 
        br_multicast_del_pg(br, pg);
@@ -649,7 +648,7 @@ struct net_bridge_port_group *br_multicast_new_port_group(
                        struct net_bridge_port *port,
                        struct br_ip *group,
                        struct net_bridge_port_group __rcu *next,
-                       unsigned char state)
+                       unsigned char flags)
 {
        struct net_bridge_port_group *p;
 
@@ -659,7 +658,7 @@ struct net_bridge_port_group *br_multicast_new_port_group(
 
        p->addr = *group;
        p->port = port;
-       p->state = state;
+       p->flags = flags;
        rcu_assign_pointer(p->next, next);
        hlist_add_head(&p->mglist, &port->mglist);
        setup_timer(&p->timer, br_multicast_port_group_expired,
@@ -702,11 +701,11 @@ static int br_multicast_add_group(struct net_bridge *br,
                        break;
        }
 
-       p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
+       p = br_multicast_new_port_group(port, group, *pp, 0);
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
-       br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
+       br_mdb_notify(br->dev, p, RTM_NEWMDB);
 
 found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -975,7 +974,7 @@ void br_multicast_disable_port(struct net_bridge_port *port)
 
        spin_lock(&br->multicast_lock);
        hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
-               if (pg->state == MDB_TEMPORARY)
+               if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
                        br_multicast_del_pg(br, pg);
 
        if (!hlist_unhashed(&port->rlist)) {
@@ -1453,8 +1452,7 @@ br_multicast_leave_group(struct net_bridge *br,
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
-                       br_mdb_notify(br->dev, port, group, RTM_DELMDB,
-                                     p->state);
+                       br_mdb_notify(br->dev, p, RTM_DELMDB);
 
                        if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
index 216018c760187db31e45206225ce8c7594a849d5..302ab0a43725845c5ec7a1ffb30db59bfaf16db5 100644 (file)
@@ -150,6 +150,9 @@ struct net_bridge_fdb_entry
        struct rcu_head                 rcu;
 };
 
+#define MDB_PG_FLAGS_PERMANENT BIT(0)
+#define MDB_PG_FLAGS_OFFLOAD   BIT(1)
+
 struct net_bridge_port_group {
        struct net_bridge_port          *port;
        struct net_bridge_port_group __rcu *next;
@@ -157,7 +160,7 @@ struct net_bridge_port_group {
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct br_ip                    addr;
-       unsigned char                   state;
+       unsigned char                   flags;
 };
 
 struct net_bridge_mdb_entry
@@ -554,11 +557,11 @@ void br_multicast_free_pg(struct rcu_head *head);
 struct net_bridge_port_group *
 br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
                            struct net_bridge_port_group __rcu *next,
-                           unsigned char state);
+                           unsigned char flags);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type, u8 state);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
+                  int type);
 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
                   int type);
 
index 393bfb22d5bbafd83c20f5953b98c6709b637452..5fcfb98f309efb5018f84628902cd1711fc36705 100644 (file)
@@ -403,6 +403,7 @@ static int is_out(const struct crush_map *map,
  * @local_retries: localized retries
  * @local_fallback_retries: localized fallback retries
  * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
+ * @stable: stable mode starts rep=0 in the recursive call for all replicas
  * @vary_r: pass r to recursive calls
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  * @parent_r: r value passed from the parent
@@ -419,6 +420,7 @@ static int crush_choose_firstn(const struct crush_map *map,
                               unsigned int local_fallback_retries,
                               int recurse_to_leaf,
                               unsigned int vary_r,
+                              unsigned int stable,
                               int *out2,
                               int parent_r)
 {
@@ -433,13 +435,13 @@ static int crush_choose_firstn(const struct crush_map *map,
        int collide, reject;
        int count = out_size;
 
-       dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
+       dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
                recurse_to_leaf ? "_LEAF" : "",
                bucket->id, x, outpos, numrep,
                tries, recurse_tries, local_retries, local_fallback_retries,
-               parent_r);
+               parent_r, stable);
 
-       for (rep = outpos; rep < numrep && count > 0 ; rep++) {
+       for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) {
                /* keep trying until we get a non-out, non-colliding item */
                ftotal = 0;
                skip_rep = 0;
@@ -512,13 +514,14 @@ static int crush_choose_firstn(const struct crush_map *map,
                                                if (crush_choose_firstn(map,
                                                         map->buckets[-1-item],
                                                         weight, weight_max,
-                                                        x, outpos+1, 0,
+                                                        x, stable ? 1 : outpos+1, 0,
                                                         out2, outpos, count,
                                                         recurse_tries, 0,
                                                         local_retries,
                                                         local_fallback_retries,
                                                         0,
                                                         vary_r,
+                                                        stable,
                                                         NULL,
                                                         sub_r) <= outpos)
                                                        /* didn't get leaf */
@@ -816,6 +819,7 @@ int crush_do_rule(const struct crush_map *map,
        int choose_local_fallback_retries = map->choose_local_fallback_tries;
 
        int vary_r = map->chooseleaf_vary_r;
+       int stable = map->chooseleaf_stable;
 
        if ((__u32)ruleno >= map->max_rules) {
                dprintk(" bad ruleno %d\n", ruleno);
@@ -835,7 +839,8 @@ int crush_do_rule(const struct crush_map *map,
                case CRUSH_RULE_TAKE:
                        if ((curstep->arg1 >= 0 &&
                             curstep->arg1 < map->max_devices) ||
-                           (-1-curstep->arg1 < map->max_buckets &&
+                           (-1-curstep->arg1 >= 0 &&
+                            -1-curstep->arg1 < map->max_buckets &&
                             map->buckets[-1-curstep->arg1])) {
                                w[0] = curstep->arg1;
                                wsize = 1;
@@ -869,6 +874,11 @@ int crush_do_rule(const struct crush_map *map,
                                vary_r = curstep->arg1;
                        break;
 
+               case CRUSH_RULE_SET_CHOOSELEAF_STABLE:
+                       if (curstep->arg1 >= 0)
+                               stable = curstep->arg1;
+                       break;
+
                case CRUSH_RULE_CHOOSELEAF_FIRSTN:
                case CRUSH_RULE_CHOOSE_FIRSTN:
                        firstn = 1;
@@ -888,6 +898,7 @@ int crush_do_rule(const struct crush_map *map,
                        osize = 0;
 
                        for (i = 0; i < wsize; i++) {
+                               int bno;
                                /*
                                 * see CRUSH_N, CRUSH_N_MINUS macros.
                                 * basically, numrep <= 0 means relative to
@@ -900,6 +911,13 @@ int crush_do_rule(const struct crush_map *map,
                                                continue;
                                }
                                j = 0;
+                               /* make sure bucket id is valid */
+                               bno = -1 - w[i];
+                               if (bno < 0 || bno >= map->max_buckets) {
+                                       /* w[i] is probably CRUSH_ITEM_NONE */
+                                       dprintk("  bad w[i] %d\n", w[i]);
+                                       continue;
+                               }
                                if (firstn) {
                                        int recurse_tries;
                                        if (choose_leaf_tries)
@@ -911,7 +929,7 @@ int crush_do_rule(const struct crush_map *map,
                                                recurse_tries = choose_tries;
                                        osize += crush_choose_firstn(
                                                map,
-                                               map->buckets[-1-w[i]],
+                                               map->buckets[bno],
                                                weight, weight_max,
                                                x, numrep,
                                                curstep->arg2,
@@ -923,6 +941,7 @@ int crush_do_rule(const struct crush_map *map,
                                                choose_local_fallback_retries,
                                                recurse_to_leaf,
                                                vary_r,
+                                               stable,
                                                c+osize,
                                                0);
                                } else {
@@ -930,7 +949,7 @@ int crush_do_rule(const struct crush_map *map,
                                                    numrep : (result_max-osize));
                                        crush_choose_indep(
                                                map,
-                                               map->buckets[-1-w[i]],
+                                               map->buckets[bno],
                                                weight, weight_max,
                                                x, out_size, numrep,
                                                curstep->arg2,
index f8f235930d887adf1df94314b18c719638328df0..3534e12683d3b2b6efa9988faff9e9b3249e6fa6 100644 (file)
@@ -1770,6 +1770,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
        u32 osdmap_epoch;
        int already_completed;
        u32 bytes;
+       u8 decode_redir;
        unsigned int i;
 
        tid = le64_to_cpu(msg->hdr.tid);
@@ -1841,6 +1842,15 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
                p += 8 + 4; /* skip replay_version */
                p += 8; /* skip user_version */
 
+               if (le16_to_cpu(msg->hdr.version) >= 7)
+                       ceph_decode_8_safe(&p, end, decode_redir, bad_put);
+               else
+                       decode_redir = 1;
+       } else {
+               decode_redir = 0;
+       }
+
+       if (decode_redir) {
                err = ceph_redirect_decode(&p, end, &redir);
                if (err)
                        goto bad_put;
index 7d8f581d9f1f7987b8d7051160c34f42ad2f5e73..243574c8cf33807fcaf9374530358f1e44080764 100644 (file)
@@ -342,23 +342,32 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
         c->choose_local_tries = ceph_decode_32(p);
         c->choose_local_fallback_tries =  ceph_decode_32(p);
         c->choose_total_tries = ceph_decode_32(p);
-        dout("crush decode tunable choose_local_tries = %d",
+        dout("crush decode tunable choose_local_tries = %d\n",
              c->choose_local_tries);
-        dout("crush decode tunable choose_local_fallback_tries = %d",
+        dout("crush decode tunable choose_local_fallback_tries = %d\n",
              c->choose_local_fallback_tries);
-        dout("crush decode tunable choose_total_tries = %d",
+        dout("crush decode tunable choose_total_tries = %d\n",
              c->choose_total_tries);
 
        ceph_decode_need(p, end, sizeof(u32), done);
        c->chooseleaf_descend_once = ceph_decode_32(p);
-       dout("crush decode tunable chooseleaf_descend_once = %d",
+       dout("crush decode tunable chooseleaf_descend_once = %d\n",
             c->chooseleaf_descend_once);
 
        ceph_decode_need(p, end, sizeof(u8), done);
        c->chooseleaf_vary_r = ceph_decode_8(p);
-       dout("crush decode tunable chooseleaf_vary_r = %d",
+       dout("crush decode tunable chooseleaf_vary_r = %d\n",
             c->chooseleaf_vary_r);
 
+       /* skip straw_calc_version, allowed_bucket_algs */
+       ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
+       *p += sizeof(u8) + sizeof(u32);
+
+       ceph_decode_need(p, end, sizeof(u8), done);
+       c->chooseleaf_stable = ceph_decode_8(p);
+       dout("crush decode tunable chooseleaf_stable = %d\n",
+            c->chooseleaf_stable);
+
 done:
        dout("crush_decode success\n");
        return c;
index cc9e3652cf93a6306e6f614f966e0fe7cf17a10e..f1284835b8c9222ffca96f63cd3351d2fcb54170 100644 (file)
@@ -4154,7 +4154,10 @@ ncls:
                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
 drop:
-               atomic_long_inc(&skb->dev->rx_dropped);
+               if (!deliver_exact)
+                       atomic_long_inc(&skb->dev->rx_dropped);
+               else
+                       atomic_long_inc(&skb->dev->rx_nohandler);
                kfree_skb(skb);
                /* Jamal, now you will not able to escape explaining
                 * me how you were going to use this. :-)
@@ -4351,6 +4354,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
 
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
                diffs |= p->vlan_tci ^ skb->vlan_tci;
+               diffs |= skb_metadata_dst_cmp(p, skb);
                if (maclen == ETH_HLEN)
                        diffs |= compare_ether_header(skb_mac_header(p),
                                                      skb_mac_header(skb));
@@ -4548,10 +4552,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
                break;
 
        case GRO_MERGED_FREE:
-               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
+                       skb_dst_drop(skb);
                        kmem_cache_free(skbuff_head_cache, skb);
-               else
+               } else {
                        __kfree_skb(skb);
+               }
                break;
 
        case GRO_HELD:
@@ -7250,24 +7256,31 @@ void netdev_run_todo(void)
        }
 }
 
-/* Convert net_device_stats to rtnl_link_stats64.  They have the same
- * fields in the same order, with only the type differing.
+/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
+ * all the same fields in the same order as net_device_stats, with only
+ * the type differing, but rtnl_link_stats64 may have additional fields
+ * at the end for newer counters.
  */
 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
                             const struct net_device_stats *netdev_stats)
 {
 #if BITS_PER_LONG == 64
-       BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
+       BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
        memcpy(stats64, netdev_stats, sizeof(*stats64));
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + sizeof(*netdev_stats), 0,
+              sizeof(*stats64) - sizeof(*netdev_stats));
 #else
-       size_t i, n = sizeof(*stats64) / sizeof(u64);
+       size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
        const unsigned long *src = (const unsigned long *)netdev_stats;
        u64 *dst = (u64 *)stats64;
 
-       BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
-                    sizeof(*stats64) / sizeof(u64));
+       BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
        for (i = 0; i < n; i++)
                dst[i] = src[i];
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + n * sizeof(u64), 0,
+              sizeof(*stats64) - n * sizeof(u64));
 #endif
 }
 EXPORT_SYMBOL(netdev_stats_to_stats64);
@@ -7297,6 +7310,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
        }
        storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
        storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
+       storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
index daf04709dd3c695ff5c47f7d30c0f005397db00e..453c803f1c8713da0138041e2b59534aae6b8206 100644 (file)
@@ -632,7 +632,7 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
        return 0;
 }
 
-u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 
 void netdev_rss_key_fill(void *buffer, size_t len)
 {
index d79699c9d1b9eb9f250254e360b2d5a7b4ff6e34..eab81bc80e5cdff993a030b109cbffa133262bf0 100644 (file)
@@ -208,7 +208,6 @@ ip:
        case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
-               __be32 flow_label;
 
 ipv6:
                iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@ ipv6:
                        key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
                }
 
-               flow_label = ip6_flowlabel(iph);
-               if (flow_label) {
+               if ((dissector_uses_key(flow_dissector,
+                                       FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
+                    (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
+                   ip6_flowlabel(iph)) {
+                       __be32 flow_label = ip6_flowlabel(iph);
+
                        if (dissector_uses_key(flow_dissector,
                                               FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
                                key_tags = skb_flow_dissector_target(flow_dissector,
index b6c8a6629b397134f5ea539adb7ef8ba80625225..da7dbc237a5f4c2f98aee3bf5f95a78f6768707e 100644 (file)
@@ -574,6 +574,7 @@ NETSTAT_ENTRY(tx_heartbeat_errors);
 NETSTAT_ENTRY(tx_window_errors);
 NETSTAT_ENTRY(rx_compressed);
 NETSTAT_ENTRY(tx_compressed);
+NETSTAT_ENTRY(rx_nohandler);
 
 static struct attribute *netstat_attrs[] = {
        &dev_attr_rx_packets.attr,
@@ -599,6 +600,7 @@ static struct attribute *netstat_attrs[] = {
        &dev_attr_tx_window_errors.attr,
        &dev_attr_rx_compressed.attr,
        &dev_attr_tx_compressed.attr,
+       &dev_attr_rx_nohandler.attr,
        NULL
 };
 
index d735e854f916040912fb12930cbc6a7950ace942..20d71358c14392901937b8c85c38164c247b046b 100644 (file)
@@ -804,6 +804,8 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
 
        a->rx_compressed = b->rx_compressed;
        a->tx_compressed = b->tx_compressed;
+
+       a->rx_nohandler = b->rx_nohandler;
 }
 
 static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
index 14596fb3717270d62fa70544b7ec2496de96e1ce..2696aefdc148887138d46f98dc0a678ce2699512 100644 (file)
@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                *fplp = fpl;
                fpl->count = 0;
                fpl->max = SCM_MAX_FD;
+               fpl->user = NULL;
        }
        fpp = &fpl->fp[fpl->count];
 
@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                *fpp++ = file;
                fpl->count++;
        }
+
+       if (!fpl->user)
+               fpl->user = get_uid(current_user());
+
        return num;
 }
 
@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
                scm->fp = NULL;
                for (i=fpl->count-1; i>=0; i--)
                        fput(fpl->fp[i]);
+               free_uid(fpl->user);
                kfree(fpl);
        }
 }
@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
                for (i = 0; i < fpl->count; i++)
                        get_file(fpl->fp[i]);
                new_fpl->max = new_fpl->count;
+               new_fpl->user = get_uid(fpl->user);
        }
        return new_fpl;
 }
index b2df375ec9c2173a8132b8efa1c3062f0510284b..5bf88f58bee7405ff65f80487a64339b92a91bcb 100644 (file)
@@ -79,6 +79,8 @@
 
 struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
 
 /**
  *     skb_panic - private function for out-of-line support
index 1df98c55744029de02522e34ed00c3cd2ca03af1..e92b759d906c1bbcad5ff3ecc977d6393df90361 100644 (file)
@@ -93,10 +93,17 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
  *  @sk2: Socket belonging to the existing reuseport group.
  *  May return ENOMEM and not add socket to group under memory pressure.
  */
-int reuseport_add_sock(struct sock *sk, const struct sock *sk2)
+int reuseport_add_sock(struct sock *sk, struct sock *sk2)
 {
        struct sock_reuseport *reuse;
 
+       if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
+               int err = reuseport_alloc(sk2);
+
+               if (err)
+                       return err;
+       }
+
        spin_lock_bh(&reuseport_lock);
        reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
                                          lockdep_is_held(&reuseport_lock)),
index 95b6139d710c46825d1e43f825188d81fcb70f60..a6beb7b6ae556dff501413d6661d9c4655502e36 100644 (file)
@@ -26,6 +26,7 @@ static int zero = 0;
 static int one = 1;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "max_skb_frags",
+               .data           = &sysctl_max_skb_frags,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &max_skb_frags,
+       },
        { }
 };
 
index c22920525e5d844bd7e4210b233440b883c2681e..775824720b6b57d68460fc8e05915e479f221414 100644 (file)
@@ -353,6 +353,7 @@ config INET_ESP
        select CRYPTO_CBC
        select CRYPTO_SHA1
        select CRYPTO_DES
+       select CRYPTO_ECHAINIV
        ---help---
          Support for IPsec ESP.
 
index 7aea0ccb6be6e463393c737103bf1ad467ee1d39..d07fc076bea0a4bc96f68075fb3bb79b95007e63 100644 (file)
@@ -1394,9 +1394,10 @@ found:
                struct fib_info *fi = fa->fa_info;
                int nhsel, err;
 
-               if ((index >= (1ul << fa->fa_slen)) &&
-                   ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
-                       continue;
+               if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
+                       if (index >= (1ul << fa->fa_slen))
+                               continue;
+               }
                if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
                        continue;
                if (fi->fib_dead)
index 46b9c887bede0568ec378d3b809dda8fa463a166..9b17c1792dce6b1f93b7c1a0c4e5cf941220b9c2 100644 (file)
@@ -482,10 +482,6 @@ EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 #define AF_INET_FAMILY(fam) true
 #endif
 
-/* Only thing we need from tcp.h */
-extern int sysctl_tcp_synack_retries;
-
-
 /* Decide when to expire the request and when to resend SYN-ACK */
 static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
                                  const int max_retries,
@@ -557,6 +553,7 @@ static void reqsk_timer_handler(unsigned long data)
 {
        struct request_sock *req = (struct request_sock *)data;
        struct sock *sk_listener = req->rsk_listener;
+       struct net *net = sock_net(sk_listener);
        struct inet_connection_sock *icsk = inet_csk(sk_listener);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        int qlen, expire = 0, resend = 0;
@@ -566,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
        if (sk_state_load(sk_listener) != TCP_LISTEN)
                goto drop;
 
-       max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
+       max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
        thresh = max_retries;
        /* Normally all the openreqs are young and become mature
         * (i.e. converted to established socket) for first timeout.
index 8bb8e7ad85483234d8a852782515ca11c93af68d..6029157a19ed1632fbf642416c0199829bf6d7ff 100644 (file)
@@ -361,13 +361,20 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
 #if IS_ENABLED(CONFIG_IPV6)
-       else if (req->sdiag_family == AF_INET6)
-               sk = inet6_lookup(net, hashinfo,
-                                 (struct in6_addr *)req->id.idiag_dst,
-                                 req->id.idiag_dport,
-                                 (struct in6_addr *)req->id.idiag_src,
-                                 req->id.idiag_sport,
-                                 req->id.idiag_if);
+       else if (req->sdiag_family == AF_INET6) {
+               if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
+                   ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
+                       sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3],
+                                        req->id.idiag_dport, req->id.idiag_src[3],
+                                        req->id.idiag_sport, req->id.idiag_if);
+               else
+                       sk = inet6_lookup(net, hashinfo,
+                                         (struct in6_addr *)req->id.idiag_dst,
+                                         req->id.idiag_dport,
+                                         (struct in6_addr *)req->id.idiag_src,
+                                         req->id.idiag_sport,
+                                         req->id.idiag_if);
+       }
 #endif
        else
                return ERR_PTR(-EINVAL);
index 3f00810b7288991f83ba147a65ac4f78e68ae594..187c6fcc3027779ba31d0721eb4a7389addaaf8a 100644 (file)
@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
        struct ipq *qp;
 
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+       skb_orphan(skb);
 
        /* Lookup (or create) queue header */
        qp = ip_find(net, ip_hdr(skb), user, vif);
index 7c51c4e1661f9aafc63829cf58b3b5ec28ed859e..56fdf4e0dce4ef4cb88e34096b4d06cae7de41c5 100644 (file)
@@ -1240,6 +1240,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
        err = ipgre_newlink(net, dev, tb, NULL);
        if (err < 0)
                goto out;
+
+       /* openvswitch users expect packet sizes to be unrestricted,
+        * so set the largest MTU we can.
+        */
+       err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
+       if (err)
+               goto out;
+
        return dev;
 out:
        free_netdev(dev);
index b1209b63381f6f9ae81cf258ad3724e1b6a6b9d8..d77eb0c3b6842bc5605a882f9ad46c609dcdeb50 100644 (file)
@@ -316,7 +316,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
 
-       if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
+       if (sysctl_ip_early_demux &&
+           !skb_dst(skb) &&
+           !skb->sk &&
+           !ip_is_fragment(iph)) {
                const struct net_protocol *ipprot;
                int protocol = iph->protocol;
 
index c7bd72e9b544848d10a490b010e0a30c4d5e4c21..89e8861e05fcb1d371c371c1784b89499b69d9df 100644 (file)
@@ -943,17 +943,31 @@ done:
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
 
-int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+       int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
 
-       if (new_mtu < 68 ||
-           new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
+       if (new_mtu < 68)
                return -EINVAL;
+
+       if (new_mtu > max_mtu) {
+               if (strict)
+                       return -EINVAL;
+
+               new_mtu = max_mtu;
+       }
+
        dev->mtu = new_mtu;
        return 0;
 }
+EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
+
+int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+       return __ip_tunnel_change_mtu(dev, new_mtu, true);
+}
 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
 
 static void ip_tunnel_dev_free(struct net_device *dev)
index 67f7c9de0b16689dd1e7d075882b9477ac5e2bf9..2ed9dd2b5f2f5a6ea3ea52208e5ec1424a501fc5 100644 (file)
@@ -143,7 +143,11 @@ static char dhcp_client_identifier[253] __initdata;
 
 /* Persistent data: */
 
+#ifdef IPCONFIG_DYNAMIC
 static int ic_proto_used;                      /* Protocol used, if any */
+#else
+#define ic_proto_used 0
+#endif
 static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
 static u8 ic_domain[64];               /* DNS (not NIS) domain name */
 
index 6fb869f646bf7a15b2f012cc1982c2a9f3d5935f..a04dee536b8ef8b5f4c1a085563d5d37b6fa118b 100644 (file)
@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
 {
        int err;
 
-       skb_orphan(skb);
-
        local_bh_disable();
        err = ip_defrag(net, skb, user);
        local_bh_enable();
index 643a86c490208cad3fa1e4098e5bc7c30d03eab8..ba0dcffada3b74cdaf8a4c1bf422704541f6d69f 100644 (file)
@@ -19,8 +19,6 @@
 #include <net/tcp.h>
 #include <net/route.h>
 
-extern int sysctl_tcp_syncookies;
-
 static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
 
 #define COOKIEBITS 24  /* Upper bits store count */
@@ -307,7 +305,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        __u8 rcv_wscale;
        struct flowi4 fl4;
 
-       if (!sysctl_tcp_syncookies || !th->ack || th->rst)
+       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
                goto out;
 
        if (tcp_synq_no_recent_overflow(sk))
index 4d367b4139a34fe04cc17dfdebab5dc42a53921a..44bb59824267c74f3aa2aa7067c1d8a6233f1b87 100644 (file)
@@ -291,22 +291,6 @@ static struct ctl_table ipv4_table[] = {
                .extra1         = &ip_ttl_min,
                .extra2         = &ip_ttl_max,
        },
-       {
-               .procname       = "tcp_syn_retries",
-               .data           = &sysctl_tcp_syn_retries,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &tcp_syn_retries_min,
-               .extra2         = &tcp_syn_retries_max
-       },
-       {
-               .procname       = "tcp_synack_retries",
-               .data           = &sysctl_tcp_synack_retries,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_max_orphans",
                .data           = &sysctl_tcp_max_orphans,
@@ -335,37 +319,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_retries1",
-               .data           = &sysctl_tcp_retries1,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra2         = &tcp_retr1_max
-       },
-       {
-               .procname       = "tcp_retries2",
-               .data           = &sysctl_tcp_retries2,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-       {
-               .procname       = "tcp_fin_timeout",
-               .data           = &sysctl_tcp_fin_timeout,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-#ifdef CONFIG_SYN_COOKIES
-       {
-               .procname       = "tcp_syncookies",
-               .data           = &sysctl_tcp_syncookies,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-#endif
        {
                .procname       = "tcp_fastopen",
                .data           = &sysctl_tcp_fastopen,
@@ -459,13 +412,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       {
-               .procname       = "tcp_orphan_retries",
-               .data           = &sysctl_tcp_orphan_retries,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_fack",
                .data           = &sysctl_tcp_fack,
@@ -480,13 +426,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "tcp_reordering",
-               .data           = &sysctl_tcp_reordering,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_max_reordering",
                .data           = &sysctl_tcp_max_reordering,
@@ -516,13 +455,6 @@ static struct ctl_table ipv4_table[] = {
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &one,
        },
-       {
-               .procname       = "tcp_notsent_lowat",
-               .data           = &sysctl_tcp_notsent_lowat,
-               .maxlen         = sizeof(sysctl_tcp_notsent_lowat),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
        {
                .procname       = "tcp_rmem",
                .data           = &sysctl_tcp_rmem,
@@ -960,6 +892,74 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "tcp_syn_retries",
+               .data           = &init_net.ipv4.sysctl_tcp_syn_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &tcp_syn_retries_min,
+               .extra2         = &tcp_syn_retries_max
+       },
+       {
+               .procname       = "tcp_synack_retries",
+               .data           = &init_net.ipv4.sysctl_tcp_synack_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+#ifdef CONFIG_SYN_COOKIES
+       {
+               .procname       = "tcp_syncookies",
+               .data           = &init_net.ipv4.sysctl_tcp_syncookies,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+#endif
+       {
+               .procname       = "tcp_reordering",
+               .data           = &init_net.ipv4.sysctl_tcp_reordering,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_retries1",
+               .data           = &init_net.ipv4.sysctl_tcp_retries1,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra2         = &tcp_retr1_max
+       },
+       {
+               .procname       = "tcp_retries2",
+               .data           = &init_net.ipv4.sysctl_tcp_retries2,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_orphan_retries",
+               .data           = &init_net.ipv4.sysctl_tcp_orphan_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_fin_timeout",
+               .data           = &init_net.ipv4.sysctl_tcp_fin_timeout,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "tcp_notsent_lowat",
+               .data           = &init_net.ipv4.sysctl_tcp_notsent_lowat,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
index 91ffef3a55d2f1861f07236b25d55eb25346345b..7d233201763bb2c8404ea952a9abb0b39998e788 100644 (file)
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#include <asm/unaligned.h>
 #include <net/busy_poll.h>
 
-int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
-
 int sysctl_tcp_min_tso_segs __read_mostly = 2;
 
 int sysctl_tcp_autocorking __read_mostly = 1;
@@ -405,7 +404,7 @@ void tcp_init_sock(struct sock *sk)
        tp->mss_cache = TCP_MSS_DEFAULT;
        u64_stats_init(&tp->syncp);
 
-       tp->reordering = sysctl_tcp_reordering;
+       tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
        tcp_enable_early_retrans(tp);
        tcp_assign_congestion_control(sk);
 
@@ -939,7 +938,7 @@ new_segment:
 
                i = skb_shinfo(skb)->nr_frags;
                can_coalesce = skb_can_coalesce(skb, i, page, offset);
-               if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+               if (!can_coalesce && i >= sysctl_max_skb_frags) {
                        tcp_mark_push(tp, skb);
                        goto new_segment;
                }
@@ -1212,7 +1211,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i == MAX_SKB_FRAGS || !sg) {
+                               if (i == sysctl_max_skb_frags || !sg) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -1465,8 +1464,10 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 
        while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
                offset = seq - TCP_SKB_CB(skb)->seq;
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+               if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+                       pr_err_once("%s: found a SYN, please report !\n", __func__);
                        offset--;
+               }
                if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
                        *off = offset;
                        return skb;
@@ -1656,8 +1657,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                                break;
 
                        offset = *seq - TCP_SKB_CB(skb)->seq;
-                       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+                       if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+                               pr_err_once("%s: found a SYN, please report !\n", __func__);
                                offset--;
+                       }
                        if (offset < skb->len)
                                goto found_ok_skb;
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -2325,6 +2328,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct net *net = sock_net(sk);
        int val;
        int err = 0;
 
@@ -2521,7 +2525,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_LINGER2:
                if (val < 0)
                        tp->linger2 = -1;
-               else if (val > sysctl_tcp_fin_timeout / HZ)
+               else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
                        tp->linger2 = 0;
                else
                        tp->linger2 = val * HZ;
@@ -2638,6 +2642,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
        unsigned int start;
+       u64 rate64;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2703,15 +2708,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_total_retrans = tp->total_retrans;
 
        rate = READ_ONCE(sk->sk_pacing_rate);
-       info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_pacing_rate);
 
        rate = READ_ONCE(sk->sk_max_pacing_rate);
-       info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_max_pacing_rate);
 
        do {
                start = u64_stats_fetch_begin_irq(&tp->syncp);
-               info->tcpi_bytes_acked = tp->bytes_acked;
-               info->tcpi_bytes_received = tp->bytes_received;
+               put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
+               put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
        } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
@@ -2723,6 +2730,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        int val, len;
 
        if (get_user(len, optlen))
@@ -2757,12 +2765,12 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                val = keepalive_probes(tp);
                break;
        case TCP_SYNCNT:
-               val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                break;
        case TCP_LINGER2:
                val = tp->linger2;
                if (val >= 0)
-                       val = (val ? : sysctl_tcp_fin_timeout) / HZ;
+                       val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
                break;
        case TCP_DEFER_ACCEPT:
                val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
index 4c65ca1a86d1033a8034a7d4881e19d5ad711cc4..279bac6357f4de9c2fa0377929badd4adc03aae8 100644 (file)
@@ -125,6 +125,41 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
        return false;
 }
 
+
+/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
+ * queue this additional data / FIN.
+ */
+void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
+               return;
+
+       skb = skb_clone(skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       skb_dst_drop(skb);
+       __skb_pull(skb, tcp_hdrlen(skb));
+       skb_set_owner_r(skb, sk);
+
+       TCP_SKB_CB(skb)->seq++;
+       TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+
+       tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+       tp->syn_data_acked = 1;
+
+       /* u64_stats_update_begin(&tp->syncp) not needed here,
+        * as we certainly are not changing upper 32bit value (0)
+        */
+       tp->bytes_received = skb->len;
+
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               tcp_fin(sk);
+}
+
 static struct sock *tcp_fastopen_create_child(struct sock *sk,
                                              struct sk_buff *skb,
                                              struct dst_entry *dst,
@@ -133,7 +168,6 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        struct tcp_sock *tp;
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
-       u32 end_seq;
        bool own_req;
 
        req->num_retrans = 0;
@@ -179,35 +213,11 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        tcp_init_metrics(child);
        tcp_init_buffer_space(child);
 
-       /* Queue the data carried in the SYN packet.
-        * We used to play tricky games with skb_get().
-        * With lockless listener, it is a dead end.
-        * Do not think about it.
-        *
-        * XXX (TFO) - we honor a zero-payload TFO request for now,
-        * (any reason not to?) but no need to queue the skb since
-        * there is no data. How about SYN+FIN?
-        */
-       end_seq = TCP_SKB_CB(skb)->end_seq;
-       if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
-               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
-
-               if (likely(skb2)) {
-                       skb_dst_drop(skb2);
-                       __skb_pull(skb2, tcp_hdrlen(skb));
-                       skb_set_owner_r(skb2, child);
-                       __skb_queue_tail(&child->sk_receive_queue, skb2);
-                       tp->syn_data_acked = 1;
-
-                       /* u64_stats_update_begin(&tp->syncp) not needed here,
-                        * as we certainly are not changing upper 32bit value (0)
-                        */
-                       tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
-               } else {
-                       end_seq = TCP_SKB_CB(skb)->seq + 1;
-               }
-       }
-       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
+       tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+
+       tcp_fastopen_add_skb(child, skb);
+
+       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
        /* tcp_conn_request() is sending the SYNACK,
         * and queues the child into listener accept queue.
         */
index 0003d409fec5beec010050ef5ccafd58bf2f1501..5ee6fe0d152dbe8ded87fc7bb1f49d2053124413 100644 (file)
@@ -80,9 +80,7 @@ int sysctl_tcp_timestamps __read_mostly = 1;
 int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly = 1;
-int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
 int sysctl_tcp_max_reordering __read_mostly = 300;
-EXPORT_SYMBOL(sysctl_tcp_reordering);
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
 int sysctl_tcp_adv_win_scale __read_mostly = 1;
@@ -126,6 +124,10 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
 
+#define REXMIT_NONE    0 /* no loss recovery to do */
+#define REXMIT_LOST    1 /* retransmit packets marked lost */
+#define REXMIT_NEW     2 /* FRTO-style transmit of unsent/new packets */
+
 /* Adapt the MSS value used to make delayed ack decision to the
  * real world.
  */
@@ -1210,6 +1212,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                sacked |= TCPCB_SACKED_ACKED;
                state->flag |= FLAG_DATA_SACKED;
                tp->sacked_out += pcount;
+               tp->delivered += pcount;  /* Out-of-order packets delivered */
 
                fack_count += pcount;
 
@@ -1821,8 +1824,12 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
 static void tcp_add_reno_sack(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 prior_sacked = tp->sacked_out;
+
        tp->sacked_out++;
        tcp_check_reno_reordering(sk, 0);
+       if (tp->sacked_out > prior_sacked)
+               tp->delivered++; /* Some out-of-order packet is delivered */
        tcp_verify_left_out(tp);
 }
 
@@ -1834,6 +1841,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
+               tp->delivered += max_t(int, acked - tp->sacked_out, 1);
                if (acked - 1 >= tp->sacked_out)
                        tp->sacked_out = 0;
                else
@@ -1873,6 +1881,7 @@ void tcp_enter_loss(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct sk_buff *skb;
        bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
@@ -1923,9 +1932,9 @@ void tcp_enter_loss(struct sock *sk)
         * suggests that the degree of reordering is over-estimated.
         */
        if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
-           tp->sacked_out >= sysctl_tcp_reordering)
+           tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
                tp->reordering = min_t(unsigned int, tp->reordering,
-                                      sysctl_tcp_reordering);
+                                      net->ipv4.sysctl_tcp_reordering);
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
@@ -2109,6 +2118,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out;
+       int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
 
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
@@ -2123,7 +2133,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
         */
        packets_out = tp->packets_out;
        if (packets_out <= tp->reordering &&
-           tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
+           tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) &&
            !tcp_may_send_now(sk)) {
                /* We have nothing to send. This connection is limited
                 * either by receiver window or by application.
@@ -2164,8 +2174,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int cnt, oldcnt;
-       int err;
+       int cnt, oldcnt, lost;
        unsigned int mss;
        /* Use SACK to deduce losses of new sequences sent during recovery */
        const u32 loss_high = tcp_is_sack(tp) ?  tp->snd_nxt : tp->high_seq;
@@ -2205,9 +2214,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
                                break;
 
                        mss = tcp_skb_mss(skb);
-                       err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
-                                          mss, GFP_ATOMIC);
-                       if (err < 0)
+                       /* If needed, chop off the prefix to mark as lost. */
+                       lost = (packets - oldcnt) * mss;
+                       if (lost < skb->len &&
+                           tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
                                break;
                        cnt = packets;
                }
@@ -2366,8 +2376,6 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
                        tp->snd_ssthresh = tp->prior_ssthresh;
                        tcp_ecn_withdraw_cwr(tp);
                }
-       } else {
-               tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
        }
        tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->undo_marker = 0;
@@ -2469,14 +2477,12 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
        tcp_ecn_queue_cwr(tp);
 }
 
-static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-                              int fast_rexmit, int flag)
+static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+                              int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
        int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
-       int newly_acked_sacked = prior_unsacked -
-                                (tp->packets_out - tp->sacked_out);
 
        if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
                return;
@@ -2494,7 +2500,8 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
        } else {
                sndcnt = min(delta, newly_acked_sacked);
        }
-       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+       /* Force a fast retransmit upon entering fast recovery */
+       sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
        tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
 
@@ -2539,7 +2546,7 @@ static void tcp_try_keep_open(struct sock *sk)
        }
 }
 
-static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
+static void tcp_try_to_open(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2553,8 +2560,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
-       } else {
-               tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
        }
 }
 
@@ -2664,7 +2669,8 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
  * recovered or spurious. Otherwise retransmits more on partial ACKs.
  */
-static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
+static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
+                            int *rexmit)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
@@ -2686,10 +2692,15 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
                                tp->frto = 0; /* Step 3.a. loss was real */
                } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
                        tp->high_seq = tp->snd_nxt;
-                       __tcp_push_pending_frames(sk, tcp_current_mss(sk),
-                                                 TCP_NAGLE_OFF);
-                       if (after(tp->snd_nxt, tp->high_seq))
-                               return; /* Step 2.b */
+                       /* Step 2.b. Try send new data (but deferred until cwnd
+                        * is updated in tcp_ack()). Otherwise fall back to
+                        * the conventional recovery.
+                        */
+                       if (tcp_send_head(sk) &&
+                           after(tcp_wnd_end(tp), tp->snd_nxt)) {
+                               *rexmit = REXMIT_NEW;
+                               return;
+                       }
                        tp->frto = 0;
                }
        }
@@ -2708,12 +2719,11 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
                else if (flag & FLAG_SND_UNA_ADVANCED)
                        tcp_reset_reno_sack(tp);
        }
-       tcp_xmit_retransmit_queue(sk);
+       *rexmit = REXMIT_LOST;
 }
 
 /* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-                                const int prior_unsacked, int flag)
+static bool tcp_try_undo_partial(struct sock *sk, const int acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2728,10 +2738,8 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
                 * can undo. Otherwise we clock out new packets but do not
                 * mark more packets lost or retransmit more.
                 */
-               if (tp->retrans_out) {
-                       tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
+               if (tp->retrans_out)
                        return true;
-               }
 
                if (!tcp_any_retrans_done(sk))
                        tp->retrans_stamp = 0;
@@ -2750,21 +2758,21 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
  * taking into account both packets sitting in receiver's buffer and
  * packets lost by network.
  *
- * Besides that it does CWND reduction, when packet loss is detected
- * and changes state of machine.
+ * Besides that it updates the congestion state when packet loss or ECN
+ * is detected. But it does not reduce the cwnd, it is done by the
+ * congestion control later.
  *
  * It does _not_ decide what to send, it is made in function
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, const int acked,
-                                 const int prior_unsacked,
-                                 bool is_dupack, int flag)
+                                 bool is_dupack, int *ack_flag, int *rexmit)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       int fast_rexmit = 0, flag = *ack_flag;
        bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
-       int fast_rexmit = 0;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
                tp->sacked_out = 0;
@@ -2811,8 +2819,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        /* Use RACK to detect loss */
        if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
-           tcp_rack_mark_lost(sk))
+           tcp_rack_mark_lost(sk)) {
                flag |= FLAG_LOST_RETRANS;
+               *ack_flag |= FLAG_LOST_RETRANS;
+       }
 
        /* E. Process state. */
        switch (icsk->icsk_ca_state) {
@@ -2821,7 +2831,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        if (tcp_is_reno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
                } else {
-                       if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
+                       if (tcp_try_undo_partial(sk, acked))
                                return;
                        /* Partial ACK arrived. Force fast retransmit. */
                        do_lost = tcp_is_reno(tp) ||
@@ -2833,7 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                }
                break;
        case TCP_CA_Loss:
-               tcp_process_loss(sk, flag, is_dupack);
+               tcp_process_loss(sk, flag, is_dupack, rexmit);
                if (icsk->icsk_ca_state != TCP_CA_Open &&
                    !(flag & FLAG_LOST_RETRANS))
                        return;
@@ -2850,7 +2860,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk, flag)) {
-                       tcp_try_to_open(sk, flag, prior_unsacked);
+                       tcp_try_to_open(sk, flag);
                        return;
                }
 
@@ -2872,8 +2882,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        if (do_lost)
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
-       tcp_xmit_retransmit_queue(sk);
+       *rexmit = REXMIT_LOST;
 }
 
 /* Kathleen Nichols' algorithm for tracking the minimum value of
@@ -3095,7 +3104,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-                              u32 prior_snd_una,
+                              u32 prior_snd_una, int *acked,
                               struct tcp_sacktag_state *sack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -3153,10 +3162,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                flag |= FLAG_ORIG_SACK_ACKED;
                }
 
-               if (sacked & TCPCB_SACKED_ACKED)
+               if (sacked & TCPCB_SACKED_ACKED) {
                        tp->sacked_out -= acked_pcount;
-               else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
-                       tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+               } else if (tcp_is_sack(tp)) {
+                       tp->delivered += acked_pcount;
+                       if (!tcp_skb_spurious_retrans(tp, skb))
+                               tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+               }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
 
@@ -3265,6 +3277,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                }
        }
 #endif
+       *acked = pkts_acked;
        return flag;
 }
 
@@ -3298,21 +3311,36 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
 /* Decide wheather to run the increase function of congestion control. */
 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
 {
-       if (tcp_in_cwnd_reduction(sk))
-               return false;
-
        /* If reordering is high then always grow cwnd whenever data is
         * delivered regardless of its ordering. Otherwise stay conservative
         * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
         * new SACK or ECE mark may first advance cwnd here and later reduce
         * cwnd in tcp_fastretrans_alert() based on more states.
         */
-       if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
+       if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
                return flag & FLAG_FORWARD_PROGRESS;
 
        return flag & FLAG_DATA_ACKED;
 }
 
+/* The "ultimate" congestion control function that aims to replace the rigid
+ * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
+ * It's called toward the end of processing an ACK with precise rate
+ * information. All transmission or retransmission are delayed afterwards.
+ */
+static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
+                            int flag)
+{
+       if (tcp_in_cwnd_reduction(sk)) {
+               /* Reduce cwnd if state mandates */
+               tcp_cwnd_reduction(sk, acked_sacked, flag);
+       } else if (tcp_may_raise_cwnd(sk, flag)) {
+               /* Advance cwnd if state allows */
+               tcp_cong_avoid(sk, ack, acked_sacked);
+       }
+       tcp_update_pacing_rate(sk);
+}
+
 /* Check that window update is acceptable.
  * The function assumes that snd_una<=ack<=snd_next.
  */
@@ -3508,6 +3536,27 @@ static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
                icsk->icsk_ca_ops->in_ack_event(sk, flags);
 }
 
+/* Congestion control has updated the cwnd already. So if we're in
+ * loss recovery then now we do any new sends (for FRTO) or
+ * retransmits (for CA_Loss or CA_recovery) that make sense.
+ */
+static void tcp_xmit_recovery(struct sock *sk, int rexmit)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (rexmit == REXMIT_NONE)
+               return;
+
+       if (unlikely(rexmit == 2)) {
+               __tcp_push_pending_frames(sk, tcp_current_mss(sk),
+                                         TCP_NAGLE_OFF);
+               if (after(tp->snd_nxt, tp->high_seq))
+                       return;
+               tp->frto = 0;
+       }
+       tcp_xmit_retransmit_queue(sk);
+}
+
 /* This routine deals with incoming acks, but not outgoing ones. */
 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
@@ -3520,8 +3569,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        bool is_dupack = false;
        u32 prior_fackets;
        int prior_packets = tp->packets_out;
-       const int prior_unsacked = tp->packets_out - tp->sacked_out;
+       u32 prior_delivered = tp->delivered;
        int acked = 0; /* Number of packets newly acked */
+       int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
 
        sack_state.first_sackt.v64 = 0;
 
@@ -3610,23 +3660,16 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                goto no_queue;
 
        /* See if we can take anything off of the retransmit queue. */
-       acked = tp->packets_out;
-       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
+       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
                                    &sack_state);
-       acked -= tp->packets_out;
 
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, acked, prior_unsacked,
-                                     is_dupack, flag);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
        }
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
 
-       /* Advance cwnd if state allows */
-       if (tcp_may_raise_cwnd(sk, flag))
-               tcp_cong_avoid(sk, ack, acked);
-
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
                struct dst_entry *dst = __sk_dst_get(sk);
                if (dst)
@@ -3635,14 +3678,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS)
                tcp_schedule_loss_probe(sk);
-       tcp_update_pacing_rate(sk);
+       tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag);
+       tcp_xmit_recovery(sk, rexmit);
        return 1;
 
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, acked, prior_unsacked,
-                                     is_dupack, flag);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3665,8 +3708,8 @@ old_ack:
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
                                                &sack_state);
-               tcp_fastretrans_alert(sk, acked, prior_unsacked,
-                                     is_dupack, flag);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_xmit_recovery(sk, rexmit);
        }
 
        SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
@@ -3997,7 +4040,7 @@ void tcp_reset(struct sock *sk)
  *
  *     If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
  */
-static void tcp_fin(struct sock *sk)
+void tcp_fin(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -5511,6 +5554,9 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
        tp->syn_data_acked = tp->syn_data;
        if (tp->syn_data_acked)
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+
+       tcp_fastopen_add_skb(sk, synack);
+
        return false;
 }
 
@@ -6117,9 +6163,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        const char *msg = "Dropping request";
        bool want_cookie = false;
+       struct net *net = sock_net(sk);
 
 #ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies) {
+       if (net->ipv4.sysctl_tcp_syncookies) {
                msg = "Sending cookies";
                want_cookie = true;
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
@@ -6128,7 +6175,7 @@ static bool tcp_syn_flood_action(const struct sock *sk,
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
        if (!queue->synflood_warned &&
-           sysctl_tcp_syncookies != 2 &&
+           net->ipv4.sysctl_tcp_syncookies != 2 &&
            xchg(&queue->synflood_warned, 1) == 0)
                pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
                        proto, ntohs(tcp_hdr(skb)->dest), msg);
@@ -6161,6 +6208,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
        struct tcp_options_received tmp_opt;
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct sock *fastopen_sk = NULL;
        struct dst_entry *dst = NULL;
        struct request_sock *req;
@@ -6171,7 +6219,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if ((sysctl_tcp_syncookies == 2 ||
+       if ((net->ipv4.sysctl_tcp_syncookies == 2 ||
             inet_csk_reqsk_queue_is_full(sk)) && !isn) {
                want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
                if (!want_cookie)
@@ -6237,7 +6285,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                        }
                }
                /* Kill the following clause, if you dislike this way. */
-               else if (!sysctl_tcp_syncookies &&
+               else if (!net->ipv4.sysctl_tcp_syncookies &&
                         (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
                          (sysctl_max_syn_backlog >> 2)) &&
                         !tcp_peer_is_proven(req, dst, false,
index 85854db3e0949af8a1953518431e5648c5f8d58f..e37222e46e0ee6098cfaa86827d848ca2f0a6fb5 100644 (file)
@@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
 
 
 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
-void tcp_req_err(struct sock *sk, u32 seq)
+void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 {
        struct request_sock *req = inet_reqsk(sk);
        struct net *net = sock_net(sk);
@@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
 
        if (seq != tcp_rsk(req)->snt_isn) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-       } else {
+       } else if (abort) {
                /*
                 * Still in SYN_RECV, just remove it silently.
                 * There is no good way to pass the error to the newly
@@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        }
        seq = ntohl(th->seq);
        if (sk->sk_state == TCP_NEW_SYN_RECV)
-               return tcp_req_err(sk, seq);
+               return tcp_req_err(sk, seq,
+                                 type == ICMP_PARAMETERPROB ||
+                                 type == ICMP_TIME_EXCEEDED ||
+                                 (type == ICMP_DEST_UNREACH &&
+                                  (code == ICMP_NET_UNREACH ||
+                                   code == ICMP_HOST_UNREACH)));
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -707,7 +712,8 @@ release_sk1:
    outside socket context is ugly, certainly. What can I do?
  */
 
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct net *net,
+                           struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key,
                            int reply_flags, u8 tos)
@@ -722,7 +728,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
                        ];
        } rep;
        struct ip_reply_arg arg;
-       struct net *net = dev_net(skb_dst(skb)->dev);
 
        memset(&rep.th, 0, sizeof(struct tcphdr));
        memset(&arg, 0, sizeof(arg));
@@ -784,7 +789,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
        struct inet_timewait_sock *tw = inet_twsk(sk);
        struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-       tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+       tcp_v4_send_ack(sock_net(sk), skb,
+                       tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
                        tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent,
@@ -803,8 +809,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
         */
-       tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
-                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+       u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+                                            tcp_sk(sk)->snd_nxt;
+
+       tcp_v4_send_ack(sock_net(sk), skb, seq,
                        tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
                        tcp_time_stamp,
                        req->ts_recent,
@@ -857,7 +865,6 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-
 #ifdef CONFIG_TCP_MD5SIG
 /*
  * RFC2385 MD5 checksumming requires a mapping of
@@ -2388,6 +2395,16 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
        net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
 
+       net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
+       net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
+       net->ipv4.sysctl_tcp_syncookies = 1;
+       net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
+       net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
+       net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
+       net->ipv4.sysctl_tcp_orphan_retries = 0;
+       net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
+       net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
+
        return 0;
 fail:
        tcp_sk_exit(net);
index c8cbc2b4b7921fb4f70681e4ac6d945f5499654c..c26241f3057b18d8e1d2aaece7d1e2f7f8399462 100644 (file)
@@ -369,6 +369,7 @@ void tcp_update_metrics(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct dst_entry *dst = __sk_dst_get(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct tcp_metrics_block *tm;
        unsigned long rtt;
        u32 val;
@@ -473,7 +474,7 @@ void tcp_update_metrics(struct sock *sk)
                if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
                        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
                        if (val < tp->reordering &&
-                           tp->reordering != sysctl_tcp_reordering)
+                           tp->reordering != net->ipv4.sysctl_tcp_reordering)
                                tcp_metric_set(tm, TCP_METRIC_REORDERING,
                                               tp->reordering);
                }
index 75632a92582425db63f1078c223cbd54a91fa0c3..fadd8b978951817f75402af7a12f2b5681b00462 100644 (file)
@@ -27,9 +27,6 @@
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
-int sysctl_tcp_syncookies __read_mostly = 1;
-EXPORT_SYMBOL(sysctl_tcp_syncookies);
-
 int sysctl_tcp_abort_on_overflow __read_mostly;
 
 struct inet_timewait_death_row tcp_death_row = {
index fda379cd600d4e033333a37301f3cba4eec0ba7b..7d2c7a400456bf036ec6b7a32eaf2657eed94378 100644 (file)
@@ -62,9 +62,6 @@ int sysctl_tcp_tso_win_divisor __read_mostly = 3;
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
-unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
-EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
-
 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                           int push_one, gfp_t gfp);
 
@@ -3476,6 +3473,7 @@ void tcp_send_probe0(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        unsigned long probe_max;
        int err;
 
@@ -3489,7 +3487,7 @@ void tcp_send_probe0(struct sock *sk)
        }
 
        if (err <= 0) {
-               if (icsk->icsk_backoff < sysctl_tcp_retries2)
+               if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
                        icsk->icsk_backoff++;
                icsk->icsk_probes_out++;
                probe_max = TCP_RTO_MAX;
index a4730a28b220a4f8c9a59db57eb30eb3501dbf43..49bc474f8e35ee50407622ab02867df698bc5117 100644 (file)
 #include <linux/gfp.h>
 #include <net/tcp.h>
 
-int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
-int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
-int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
-int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
-int sysctl_tcp_orphan_retries __read_mostly;
 int sysctl_tcp_thin_linear_timeouts __read_mostly;
 
 static void tcp_write_err(struct sock *sk)
@@ -82,7 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 /* Calculate maximal number or retries on an orphaned socket. */
 static int tcp_orphan_retries(struct sock *sk, bool alive)
 {
-       int retries = sysctl_tcp_orphan_retries; /* May be zero. */
+       int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 
        /* We know from an ICMP that something is wrong. */
        if (sk->sk_err_soft && !alive)
@@ -157,6 +152,7 @@ static int tcp_write_timeout(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        int retry_until;
        bool do_reset, syn_set = false;
 
@@ -169,10 +165,10 @@ static int tcp_write_timeout(struct sock *sk)
                                NET_INC_STATS_BH(sock_net(sk),
                                                 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                }
-               retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                syn_set = true;
        } else {
-               if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+               if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
                        /* Some middle-boxes may black-hole Fast Open _after_
                         * the handshake. Therefore we conservatively disable
                         * Fast Open on this path on recurring timeouts with
@@ -181,7 +177,7 @@ static int tcp_write_timeout(struct sock *sk)
                        if (tp->syn_data_acked &&
                            tp->bytes_acked <= tp->rx_opt.mss_clamp) {
                                tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                               if (icsk->icsk_retransmits == sysctl_tcp_retries1)
+                               if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
                                        NET_INC_STATS_BH(sock_net(sk),
                                                         LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                        }
@@ -191,7 +187,7 @@ static int tcp_write_timeout(struct sock *sk)
                        dst_negative_advice(sk);
                }
 
-               retry_until = sysctl_tcp_retries2;
+               retry_until = net->ipv4.sysctl_tcp_retries2;
                if (sock_flag(sk, SOCK_DEAD)) {
                        const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 
@@ -305,7 +301,7 @@ static void tcp_probe_timer(struct sock *sk)
                 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
                goto abort;
 
-       max_probes = sysctl_tcp_retries2;
+       max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
        if (sock_flag(sk, SOCK_DEAD)) {
                const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
@@ -332,7 +328,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int max_retries = icsk->icsk_syn_retries ? :
-           sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+           sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
        struct request_sock *req;
 
        req = tcp_sk(sk)->fastopen_rsk;
@@ -360,6 +356,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
 void tcp_retransmit_timer(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (tp->fastopen_rsk) {
@@ -490,7 +487,7 @@ out_reset_timer:
                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
-       if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
+       if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
                __sk_dst_reset(sk);
 
 out:;
index dc45b538e237b908f4d6c6337b4c533a4236735a..be0b21852b138ebc5eed9caf37740cbe1cb1abe0 100644 (file)
@@ -499,6 +499,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
        struct sock *sk, *result;
        struct hlist_nulls_node *node;
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
 begin:
@@ -512,14 +513,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
-                                                           sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
+                                                       sizeof(struct udphdr));
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
@@ -563,6 +568,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
        rcu_read_lock();
@@ -601,14 +607,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
index bb7dabe2ebbf8aabc624af02c00dbc2b33ad544a..40c897515ddc44d57c434dcd761c2ddc5e7fad50 100644 (file)
@@ -69,6 +69,7 @@ config INET6_ESP
        select CRYPTO_CBC
        select CRYPTO_SHA1
        select CRYPTO_DES
+       select CRYPTO_ECHAINIV
        ---help---
          Support for IPsec ESP.
 
index 38eeddedfc21baa843b315d720cc94a28b6af605..9efd9ffdc34cf5f39ab8b2548050cfe1e6ed4141 100644 (file)
@@ -3538,6 +3538,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 {
        struct inet6_dev *idev = ifp->idev;
        struct net_device *dev = idev->dev;
+       bool notify = false;
 
        addrconf_join_solict(dev, &ifp->addr);
 
@@ -3583,7 +3584,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
                        /* Because optimistic nodes can use this address,
                         * notify listeners. If DAD fails, RTM_DELADDR is sent.
                         */
-                       ipv6_ifa_notify(RTM_NEWADDR, ifp);
+                       notify = true;
                }
        }
 
@@ -3591,6 +3592,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 out:
        spin_unlock(&ifp->lock);
        read_unlock_bh(&idev->lock);
+       if (notify)
+               ipv6_ifa_notify(RTM_NEWADDR, ifp);
 }
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
index 517c55b01ba84b55a0004ce9505d14c4a3951cfc..428162155280ca2af782ea9fd9fa26e0d1666d89 100644 (file)
@@ -162,6 +162,9 @@ ipv4_connected:
        fl6.fl6_dport = inet->inet_dport;
        fl6.fl6_sport = inet->inet_sport;
 
+       if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
        if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
                fl6.flowi6_oif = np->mcast_oif;
 
index 1f9ebe3cbb4ac042edd0b05754d1fc03cdfe73cd..dc2db4f7b182c4ebc1a8a51487a3d2e893955df9 100644 (file)
@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                }
                spin_lock_bh(&ip6_sk_fl_lock);
                for (sflp = &np->ipv6_fl_list;
-                    (sfl = rcu_dereference(*sflp)) != NULL;
+                    (sfl = rcu_dereference_protected(*sflp,
+                                                     lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
                     sflp = &sfl->next) {
                        if (sfl->fl->label == freq.flr_label) {
                                if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
                                        np->flow_label &= ~IPV6_FLOWLABEL_MASK;
-                               *sflp = rcu_dereference(sfl->next);
+                               *sflp = sfl->next;
                                spin_unlock_bh(&ip6_sk_fl_lock);
                                fl_release(sfl->fl);
                                kfree_rcu(sfl, rcu);
index 23de98f976d5b7c846e42e9260acbac3b1632c17..a163102f1803e5f9eb4a60c501bd4adcc27bd62b 100644 (file)
@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
        struct rt6_info *rt;
 #endif
        int err;
+       int flags = 0;
 
        /* The correct way to handle this would be to do
         * ip6_route_get_saddr, and then ip6_route_output; however,
@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                        dst_release(*dst);
                        *dst = NULL;
                }
+
+               if (fl6->flowi6_oif)
+                       flags |= RT6_LOOKUP_F_IFACE;
        }
 
        if (!*dst)
-               *dst = ip6_route_output(net, sk, fl6);
+               *dst = ip6_route_output_flags(net, sk, fl6, flags);
 
        err = (*dst)->error;
        if (err)
index 3c8834bc822d2b64b8d7f4b33a9042dd279c584a..ed446639219c3ae8832a19a1f944e4dd6ddc6302 100644 (file)
@@ -1183,11 +1183,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
        return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
 }
 
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
-                                   struct flowi6 *fl6)
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+                                        struct flowi6 *fl6, int flags)
 {
        struct dst_entry *dst;
-       int flags = 0;
        bool any_src;
 
        dst = l3mdev_rt6_dst_by_oif(net, fl6);
@@ -1208,7 +1207,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
 
        return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
 }
-EXPORT_SYMBOL(ip6_route_output);
+EXPORT_SYMBOL_GPL(ip6_route_output_flags);
 
 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
index e794ef66a40135f33b5731a1059a6f45d0fc90b7..2066d1c25a11bb18bbe75caf4abe38eeccaa4b61 100644 (file)
@@ -201,14 +201,14 @@ static int ipip6_tunnel_create(struct net_device *dev)
        if ((__force u16)t->parms.i_flags & SIT_ISATAP)
                dev->priv_flags |= IFF_ISATAP;
 
+       dev->rtnl_link_ops = &sit_link_ops;
+
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
 
        ipip6_tunnel_clone_6rd(dev, sitn);
 
-       dev->rtnl_link_ops = &sit_link_ops;
-
        dev_hold(dev);
 
        ipip6_tunnel_link(sitn, t);
index 2906ef20795e4ce2365011128c5eb9bfbcc9bdca..0e393ff7f5d07e7294df6cda18deddad568bddbb 100644 (file)
@@ -148,7 +148,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst;
        __u8 rcv_wscale;
 
-       if (!sysctl_tcp_syncookies || !th->ack || th->rst)
+       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
                goto out;
 
        if (tcp_synq_no_recent_overflow(sk))
index bd5597227b18dca536ff69b20346b357d21d2daf..ad422babc1f50fdfe12c22f7cb1bcb97ced3f2a7 100644 (file)
@@ -327,6 +327,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct tcp_sock *tp;
        __u32 seq, snd_una;
        struct sock *sk;
+       bool fatal;
        int err;
 
        sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -345,8 +346,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                return;
        }
        seq = ntohl(th->seq);
+       fatal = icmpv6_err_convert(type, code, &err);
        if (sk->sk_state == TCP_NEW_SYN_RECV)
-               return tcp_req_err(sk, seq);
+               return tcp_req_err(sk, seq, fatal);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -400,7 +402,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
        }
 
-       icmpv6_err_convert(type, code, &err);
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
index 5d2c2afffe7b2c0790bc00505c518d21eb4b589b..22e28a44e3c88c6e52ff59ea1c7dd8a3e964e701 100644 (file)
@@ -257,6 +257,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
        struct sock *sk, *result;
        struct hlist_nulls_node *node;
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
 begin:
@@ -270,14 +271,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
-                                                           sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
+                                                       sizeof(struct udphdr));
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
@@ -321,6 +326,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
        rcu_read_lock();
@@ -358,14 +364,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
index 3c4caa60c9265dd95f62d7a3aba84d40cdba0492..5728e76ca6d51adabef2cde3e06b0b9184c2d07e 100644 (file)
@@ -134,11 +134,10 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
                return -1;
        }
        skb_put(skb, count);
+       pr_debug("%s(), skb->len=%d\n", __func__, skb->len);
 
        spin_unlock_irqrestore(&self->spinlock, flags);
 
-       pr_debug("%s(), skb->len=%d\n", __func__ , skb->len);
-
        if (flush) {
                /* ircomm_tty_do_softint will take care of the rest */
                schedule_work(&self->tqueue);
index ef50a94d3eb726f75d4202a7a15c7dd1f812a3aa..fc3598a922b071503514af87deb603a154e2b33b 100644 (file)
@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
        if (!addr || addr->sa_family != AF_IUCV)
                return -EINVAL;
 
+       if (addr_len < sizeof(struct sockaddr_iucv))
+               return -EINVAL;
+
        lock_sock(sk);
        if (sk->sk_state != IUCV_OPEN) {
                err = -EBADFD;
index 10ad4ac1fa0ba8ebd04e793595fab47d326194ce..3a8f881b22f10c983ab354a4393157e090fd6d3a 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -61,16 +62,25 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 {
        struct ieee80211_local *local = sta->local;
        struct tid_ampdu_rx *tid_rx;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_RX_STOP,
+               .tid = tid,
+               .amsdu = false,
+               .timeout = 0,
+               .ssn = 0,
+       };
 
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
 
        tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
                                        lockdep_is_held(&sta->ampdu_mlme.mtx));
 
-       if (!tid_rx)
+       if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid))
                return;
 
        RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
+       __clear_bit(tid, sta->ampdu_mlme.agg_session_valid);
 
        ht_dbg(sta->sdata,
               "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
@@ -78,8 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
               initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
               (int)reason);
 
-       if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
-                            &sta->sta, tid, NULL, 0, false))
+       if (drv_ampdu_action(local, sta->sdata, &params))
                sdata_info(sta->sdata,
                           "HW problem - can not stop rx aggregation for %pM tid %d\n",
                           sta->sta.addr, tid);
@@ -89,6 +98,13 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
                ieee80211_send_delba(sta->sdata, sta->sta.addr,
                                     tid, WLAN_BACK_RECIPIENT, reason);
 
+       /*
+        * return here in case tid_rx is not assigned - which will happen if
+        * IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set.
+        */
+       if (!tid_rx)
+               return;
+
        del_timer_sync(&tid_rx->session_timer);
 
        /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
@@ -237,6 +253,15 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
 {
        struct ieee80211_local *local = sta->sdata->local;
        struct tid_ampdu_rx *tid_agg_rx;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_RX_START,
+               .tid = tid,
+               .amsdu = false,
+               .timeout = timeout,
+               .ssn = start_seq_num,
+       };
+
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
 
@@ -275,11 +300,12 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        /* make sure the size doesn't exceed the maximum supported by the hw */
        if (buf_size > local->hw.max_rx_aggregation_subframes)
                buf_size = local->hw.max_rx_aggregation_subframes;
+       params.buf_size = buf_size;
 
        /* examine state machine */
        mutex_lock(&sta->ampdu_mlme.mtx);
 
-       if (sta->ampdu_mlme.tid_rx[tid]) {
+       if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
                ht_dbg_ratelimited(sta->sdata,
                                   "unexpected AddBA Req from %pM on tid %u\n",
                                   sta->sta.addr, tid);
@@ -290,8 +316,18 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
                                                false);
        }
 
+       if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) {
+               ret = drv_ampdu_action(local, sta->sdata, &params);
+               ht_dbg(sta->sdata,
+                      "Rx A-MPDU request on %pM tid %d result %d\n",
+                      sta->sta.addr, tid, ret);
+               if (!ret)
+                       status = WLAN_STATUS_SUCCESS;
+               goto end;
+       }
+
        /* prepare A-MPDU MLME for Rx aggregation */
-       tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+       tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
        if (!tid_agg_rx)
                goto end;
 
@@ -322,8 +358,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        for (i = 0; i < buf_size; i++)
                __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
 
-       ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
-                              &sta->sta, tid, &start_seq_num, 0, false);
+       ret = drv_ampdu_action(local, sta->sdata, &params);
        ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
               sta->sta.addr, tid, ret);
        if (ret) {
@@ -341,6 +376,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        tid_agg_rx->timeout = timeout;
        tid_agg_rx->stored_mpdu_num = 0;
        tid_agg_rx->auto_seq = auto_seq;
+       tid_agg_rx->reorder_buf_filtered = 0;
        status = WLAN_STATUS_SUCCESS;
 
        /* activate it for RX */
@@ -352,6 +388,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        }
 
 end:
+       if (status == WLAN_STATUS_SUCCESS)
+               __set_bit(tid, sta->ampdu_mlme.agg_session_valid);
        mutex_unlock(&sta->ampdu_mlme.mtx);
 
 end_no_lock:
index ff757181b0a85c820e1acc53a088e95c78b87bff..4932e9f243a2cb9e156e7e24ac1c098fc9a4b19c 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -295,7 +296,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 {
        struct ieee80211_local *local = sta->local;
        struct tid_ampdu_tx *tid_tx;
-       enum ieee80211_ampdu_mlme_action action;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .tid = tid,
+               .buf_size = 0,
+               .amsdu = false,
+               .timeout = 0,
+               .ssn = 0,
+       };
        int ret;
 
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
@@ -304,10 +312,10 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
        case AGG_STOP_DECLINED:
        case AGG_STOP_LOCAL_REQUEST:
        case AGG_STOP_PEER_REQUEST:
-               action = IEEE80211_AMPDU_TX_STOP_CONT;
+               params.action = IEEE80211_AMPDU_TX_STOP_CONT;
                break;
        case AGG_STOP_DESTROY_STA:
-               action = IEEE80211_AMPDU_TX_STOP_FLUSH;
+               params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
                break;
        default:
                WARN_ON_ONCE(1);
@@ -330,9 +338,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                spin_unlock_bh(&sta->lock);
                if (reason != AGG_STOP_DESTROY_STA)
                        return -EALREADY;
-               ret = drv_ampdu_action(local, sta->sdata,
-                                      IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
-                                      &sta->sta, tid, NULL, 0, false);
+               params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
+               ret = drv_ampdu_action(local, sta->sdata, &params);
                WARN_ON_ONCE(ret);
                return 0;
        }
@@ -381,8 +388,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                        WLAN_BACK_INITIATOR;
        tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
 
-       ret = drv_ampdu_action(local, sta->sdata, action,
-                              &sta->sta, tid, NULL, 0, false);
+       ret = drv_ampdu_action(local, sta->sdata, &params);
 
        /* HW shall not deny going back to legacy */
        if (WARN_ON(ret)) {
@@ -445,7 +451,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        struct tid_ampdu_tx *tid_tx;
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u16 start_seq_num;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_TX_START,
+               .tid = tid,
+               .buf_size = 0,
+               .amsdu = false,
+               .timeout = 0,
+       };
        int ret;
 
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -467,10 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
         */
        synchronize_net();
 
-       start_seq_num = sta->tid_seq[tid] >> 4;
-
-       ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-                              &sta->sta, tid, &start_seq_num, 0, false);
+       params.ssn = sta->tid_seq[tid] >> 4;
+       ret = drv_ampdu_action(local, sdata, &params);
        if (ret) {
                ht_dbg(sdata,
                       "BA request denied - HW unavailable for %pM tid %d\n",
@@ -499,7 +510,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
-                                    tid_tx->dialog_token, start_seq_num,
+                                    tid_tx->dialog_token, params.ssn,
                                     IEEE80211_MAX_AMPDU_BUF,
                                     tid_tx->timeout);
 }
@@ -684,18 +695,24 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
                                         struct sta_info *sta, u16 tid)
 {
        struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_TX_OPERATIONAL,
+               .tid = tid,
+               .timeout = 0,
+               .ssn = 0,
+       };
 
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
 
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+       params.buf_size = tid_tx->buf_size;
+       params.amsdu = tid_tx->amsdu;
 
        ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
               sta->sta.addr, tid);
 
-       drv_ampdu_action(local, sta->sdata,
-                        IEEE80211_AMPDU_TX_OPERATIONAL,
-                        &sta->sta, tid, NULL, tid_tx->buf_size,
-                        tid_tx->amsdu);
+       drv_ampdu_action(local, sta->sdata, &params);
 
        /*
         * synchronize with TX path, while splicing the TX path
index 166a29fe6c35f63a5357a01ceeb140f19346f9bd..66d22de93c8df75e7d0b747bf71a45cb3b45f927 100644 (file)
@@ -1131,6 +1131,34 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                sta->sta.max_sp = params->max_sp;
        }
 
+       /* The sender might not have sent the last bit, consider it to be 0 */
+       if (params->ext_capab_len >= 8) {
+               u8 val = (params->ext_capab[7] &
+                         WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB) >> 7;
+
+               /* we did get all the bits, take the MSB as well */
+               if (params->ext_capab_len >= 9) {
+                       u8 val_msb = params->ext_capab[8] &
+                               WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB;
+                       val_msb <<= 1;
+                       val |= val_msb;
+               }
+
+               switch (val) {
+               case 1:
+                       sta->sta.max_amsdu_subframes = 32;
+                       break;
+               case 2:
+                       sta->sta.max_amsdu_subframes = 16;
+                       break;
+               case 3:
+                       sta->sta.max_amsdu_subframes = 8;
+                       break;
+               default:
+                       sta->sta.max_amsdu_subframes = 0;
+               }
+       }
+
        /*
         * cfg80211 validates this (1-2007) and allows setting the AID
         * only when creating a new station entry
@@ -1160,6 +1188,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                                  params->ht_capa, sta);
 
+       /* VHT can override some HT caps such as the A-MSDU max length */
        if (params->vht_capa)
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                    params->vht_capa, sta);
index 1d1b9b7bdefe74ac851ca6d01554d663fae39541..283981108ca80cb5bb7182b28e626a1a14c09fb1 100644 (file)
@@ -231,7 +231,7 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
                    !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
                        continue;
 
-               if (!sta->uploaded)
+               if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
                        continue;
 
                max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
index 3e24d0ddb51bfaca18d39669e41dba2bb6a91bb5..4ab5c522ceeeee5def93f869fd96618999a2daf5 100644 (file)
@@ -126,6 +126,7 @@ static const char *hw_flag_names[] = {
        FLAG(SUPPORTS_AMSDU_IN_AMPDU),
        FLAG(BEACON_TX_STATUS),
        FLAG(NEEDS_UNIQUE_STA_ADDR),
+       FLAG(SUPPORTS_REORDERING_BUFFER),
 #undef FLAG
 };
 
index ca1fe5576103767c3a98b52e037c0034b949887f..c258f1041d330885d08869e302a5ec255b470b4b 100644 (file)
@@ -284,9 +284,7 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local,
 
 int drv_ampdu_action(struct ieee80211_local *local,
                     struct ieee80211_sub_if_data *sdata,
-                    enum ieee80211_ampdu_mlme_action action,
-                    struct ieee80211_sta *sta, u16 tid,
-                    u16 *ssn, u8 buf_size, bool amsdu)
+                    struct ieee80211_ampdu_params *params)
 {
        int ret = -EOPNOTSUPP;
 
@@ -296,12 +294,10 @@ int drv_ampdu_action(struct ieee80211_local *local,
        if (!check_sdata_in_driver(sdata))
                return -EIO;
 
-       trace_drv_ampdu_action(local, sdata, action, sta, tid,
-                              ssn, buf_size, amsdu);
+       trace_drv_ampdu_action(local, sdata, params);
 
        if (local->ops->ampdu_action)
-               ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
-                                              sta, tid, ssn, buf_size, amsdu);
+               ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params);
 
        trace_drv_return_int(local, ret);
 
index 154ce4b13406d5a31bac8797f8069184ca9cd6f5..18b0d65baff000156c8015f76b9ce527a938e95a 100644 (file)
@@ -585,9 +585,7 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
 
 int drv_ampdu_action(struct ieee80211_local *local,
                     struct ieee80211_sub_if_data *sdata,
-                    enum ieee80211_ampdu_mlme_action action,
-                    struct ieee80211_sta *sta, u16 tid,
-                    u16 *ssn, u8 buf_size, bool amsdu);
+                    struct ieee80211_ampdu_params *params);
 
 static inline int drv_get_survey(struct ieee80211_local *local, int idx,
                                struct survey_info *survey)
index 7a76ce639d58d6071681551e916f0125cf376212..f4a52877356349abed96d0a77d6ae75f301181e4 100644 (file)
@@ -230,6 +230,11 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
        /* set Rx highest rate */
        ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest;
 
+       if (ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU)
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935;
+       else
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
+
  apply:
        changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
 
index f7fc0e00497fd73f858833e2cb54ec3058b177fe..fc3238376b39546d025f81ad92240356a77aa48b 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1050,9 +1051,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
                struct cfg80211_chan_def chandef;
                enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
 
-               ieee80211_ht_oper_to_chandef(channel,
-                                            elems->ht_operation,
-                                            &chandef);
+               cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+               ieee80211_chandef_ht_oper(elems->ht_operation, &chandef);
 
                memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
                rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
@@ -1066,9 +1066,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
                        struct ieee80211_vht_cap cap_ie;
                        struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
 
-                       ieee80211_vht_oper_to_chandef(channel,
-                                                     elems->vht_operation,
-                                                     &chandef);
+                       ieee80211_chandef_vht_oper(elems->vht_operation,
+                                                  &chandef);
                        memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
                        ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                            &cap_ie, sta);
@@ -1485,14 +1484,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
 
                sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
-               num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
-                                                        &ifibss->chandef,
-                                                        channels,
-                                                        ARRAY_SIZE(channels));
                scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
-               ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-                                           ifibss->ssid_len, channels, num,
-                                           scan_width);
+
+               if (ifibss->fixed_channel) {
+                       num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+                                                                &ifibss->chandef,
+                                                                channels,
+                                                                ARRAY_SIZE(channels));
+                       ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+                                                   ifibss->ssid_len, channels,
+                                                   num, scan_width);
+               } else {
+                       ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+                                                   ifibss->ssid_len, NULL,
+                                                   0, scan_width);
+               }
        } else {
                int interval = IEEE80211_SCAN_INTERVAL;
 
@@ -1733,7 +1739,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
                if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
                        continue;
                sdata->u.ibss.last_scan_completed = jiffies;
-               ieee80211_queue_work(&local->hw, &sdata->work);
        }
        mutex_unlock(&local->iflist_mtx);
 }
index b84f6aa32c0831ce200f286af2ecd947ef0fefde..a49c10361f1c2f58faa922eb5f9866cf43cbf842 100644 (file)
@@ -804,6 +804,7 @@ enum txq_info_flags {
 struct txq_info {
        struct sk_buff_head queue;
        unsigned long flags;
+       unsigned long byte_cnt;
 
        /* keep last! */
        struct ieee80211_txq txq;
@@ -1466,7 +1467,13 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
 {
        WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
                     status->flag & RX_FLAG_MACTIME_END);
-       return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END);
+       if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END))
+               return true;
+       /* can't handle HT/VHT preamble yet */
+       if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
+           !(status->flag & (RX_FLAG_HT | RX_FLAG_VHT)))
+               return true;
+       return false;
 }
 
 u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
@@ -1714,6 +1721,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
 void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+                                struct ieee80211_mgmt *mgmt);
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                   struct sta_info *sta, u8 opmode,
                                  enum ieee80211_band band);
@@ -1829,20 +1838,6 @@ static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
        ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
 }
 
-static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames)
-{
-       struct sk_buff *tail = skb_peek_tail(frames);
-       struct ieee80211_rx_status *status;
-
-       if (!tail)
-               return false;
-
-       status = IEEE80211_SKB_RXCB(tail);
-       if (status->flag & RX_FLAG_AMSDU_MORE)
-               return false;
-
-       return true;
-}
 
 extern const int ieee802_1d_to_ac[8];
 
@@ -1986,12 +1981,10 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
 
 /* channel management */
-void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                 const struct ieee80211_ht_operation *ht_oper,
-                                 struct cfg80211_chan_def *chandef);
-void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                  const struct ieee80211_vht_operation *oper,
-                                  struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
+                              struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+                               struct cfg80211_chan_def *chandef);
 u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
 
 int __must_check
index c9e325d2e120c0f9c230dbace71c3c405b9216a3..453b4e7417804105cb136e2ec7f4c57a39392db3 100644 (file)
@@ -977,7 +977,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.txq) {
                struct txq_info *txqi = to_txq_info(sdata->vif.txq);
 
+               spin_lock_bh(&txqi->queue.lock);
                ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+               txqi->byte_cnt = 0;
+               spin_unlock_bh(&txqi->queue.lock);
+
                atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
        }
 
@@ -1271,6 +1275,16 @@ static void ieee80211_iface_work(struct work_struct *work)
                                }
                        }
                        mutex_unlock(&local->sta_mtx);
+               } else if (ieee80211_is_action(mgmt->frame_control) &&
+                          mgmt->u.action.category == WLAN_CATEGORY_VHT) {
+                       switch (mgmt->u.action.u.vht_group_notif.action_code) {
+                       case WLAN_VHT_ACTION_GROUPID_MGMT:
+                               ieee80211_process_mu_groups(sdata, mgmt);
+                               break;
+                       default:
+                               WARN_ON(1);
+                               break;
+                       }
                } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
                        struct ieee80211_hdr *hdr = (void *)mgmt;
                        /*
index 6bcf0faa4a89dbf38ee125099ad8c328f378331f..8190bf27ebfff8b0a594b7c029130dc9b1b99957 100644 (file)
@@ -248,6 +248,7 @@ static void ieee80211_restart_work(struct work_struct *work)
 
        /* wait for scan work complete */
        flush_workqueue(local->workqueue);
+       flush_work(&local->sched_scan_stopped_work);
 
        WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
             "%s called with hardware scan in progress\n", __func__);
@@ -256,6 +257,11 @@ static void ieee80211_restart_work(struct work_struct *work)
        list_for_each_entry(sdata, &local->interfaces, list)
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
        ieee80211_scan_cancel(local);
+
+       /* make sure any new ROC will consider local->in_reconfig */
+       flush_delayed_work(&local->roc_work);
+       flush_work(&local->hw_roc_done);
+
        ieee80211_reconfig(local);
        rtnl_unlock();
 }
index fa28500f28fd9fd8d452602b0407741589f12e36..d32cefcb63b02ecd4f1af220c559a2a82c75430d 100644 (file)
@@ -91,11 +91,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.bss_conf.basic_rates != basic_rates)
                return false;
 
-       ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
-                                    ie->ht_operation, &sta_chan_def);
-
-       ieee80211_vht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
-                                     ie->vht_operation, &sta_chan_def);
+       cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+                               NL80211_CHAN_NO_HT);
+       ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
+       ieee80211_chandef_vht_oper(ie->vht_operation, &sta_chan_def);
 
        if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
                                         &sta_chan_def))
@@ -1370,17 +1369,6 @@ out:
        sdata_unlock(sdata);
 }
 
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
-{
-       struct ieee80211_sub_if_data *sdata;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               if (ieee80211_vif_is_mesh(&sdata->vif) &&
-                   ieee80211_sdata_running(sdata))
-                       ieee80211_queue_work(&local->hw, &sdata->work);
-       rcu_read_unlock();
-}
 
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
 {
index a1596344c3ba1c5fe590767f3c0483310d63121a..87c017a3b1ce8db928a999bec05fb1cf32169b73 100644 (file)
@@ -137,8 +137,6 @@ struct mesh_path {
  * @copy_node: function to copy nodes of the table
  * @size_order: determines size of the table, there will be 2^size_order hash
  *     buckets
- * @mean_chain_len: maximum average length for the hash buckets' list, if it is
- *     reached, the table will grow
  * @known_gates: list of known mesh gates and their mpaths by the station. The
  * gate's mpath may or may not be resolved and active.
  *
@@ -154,7 +152,6 @@ struct mesh_table {
        void (*free_node) (struct hlist_node *p, bool free_leafs);
        int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
        int size_order;
-       int mean_chain_len;
        struct hlist_head *known_gates;
        spinlock_t gates_lock;
 
@@ -362,14 +359,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
        return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
 }
 
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
-
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
 void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_stop(void);
 #else
-static inline void
-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
 { return false; }
 static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
index dadf8dc6f1cfdceb762cabcf057e2e485ad853e8..2ba7aa56b11c0ec121d7851665c989e1a2b3b2d1 100644 (file)
@@ -55,16 +55,21 @@ int mpp_paths_generation;
 static DEFINE_RWLOCK(pathtbl_resize_lock);
 
 
+static inline struct mesh_table *resize_dereference_paths(
+       struct mesh_table __rcu *table)
+{
+       return rcu_dereference_protected(table,
+                                       lockdep_is_held(&pathtbl_resize_lock));
+}
+
 static inline struct mesh_table *resize_dereference_mesh_paths(void)
 {
-       return rcu_dereference_protected(mesh_paths,
-               lockdep_is_held(&pathtbl_resize_lock));
+       return resize_dereference_paths(mesh_paths);
 }
 
 static inline struct mesh_table *resize_dereference_mpp_paths(void)
 {
-       return rcu_dereference_protected(mpp_paths,
-               lockdep_is_held(&pathtbl_resize_lock));
+       return resize_dereference_paths(mpp_paths);
 }
 
 /*
@@ -160,11 +165,10 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
        int i;
 
        if (atomic_read(&oldtbl->entries)
-                       < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
+                       < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
                return -EAGAIN;
 
        newtbl->free_node = oldtbl->free_node;
-       newtbl->mean_chain_len = oldtbl->mean_chain_len;
        newtbl->copy_node = oldtbl->copy_node;
        newtbl->known_gates = oldtbl->known_gates;
        atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
@@ -585,7 +589,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
 
        hlist_add_head_rcu(&new_node->list, bucket);
        if (atomic_inc_return(&tbl->entries) >=
-           tbl->mean_chain_len * (tbl->hash_mask + 1))
+           MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
                grow = 1;
 
        mesh_paths_generation++;
@@ -714,7 +718,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
 
        hlist_add_head_rcu(&new_node->list, bucket);
        if (atomic_inc_return(&tbl->entries) >=
-           tbl->mean_chain_len * (tbl->hash_mask + 1))
+           MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
                grow = 1;
 
        spin_unlock(&tbl->hashwlock[hash_idx]);
@@ -835,6 +839,29 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
        rcu_read_unlock();
 }
 
+static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
+                              const u8 *proxy)
+{
+       struct mesh_table *tbl;
+       struct mesh_path *mpp;
+       struct mpath_node *node;
+       int i;
+
+       rcu_read_lock();
+       read_lock_bh(&pathtbl_resize_lock);
+       tbl = resize_dereference_mpp_paths();
+       for_each_mesh_entry(tbl, node, i) {
+               mpp = node->mpath;
+               if (ether_addr_equal(mpp->mpp, proxy)) {
+                       spin_lock(&tbl->hashwlock[i]);
+                       __mesh_path_del(tbl, node);
+                       spin_unlock(&tbl->hashwlock[i]);
+               }
+       }
+       read_unlock_bh(&pathtbl_resize_lock);
+       rcu_read_unlock();
+}
+
 static void table_flush_by_iface(struct mesh_table *tbl,
                                 struct ieee80211_sub_if_data *sdata)
 {
@@ -876,14 +903,17 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 }
 
 /**
- * mesh_path_del - delete a mesh path from the table
+ * table_path_del - delete a path from the mesh or mpp table
  *
- * @addr: dst address (ETH_ALEN length)
+ * @tbl: mesh or mpp path table
  * @sdata: local subif
+ * @addr: dst address (ETH_ALEN length)
  *
  * Returns: 0 if successful
  */
-int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+static int table_path_del(struct mesh_table __rcu *rcu_tbl,
+                         struct ieee80211_sub_if_data *sdata,
+                         const u8 *addr)
 {
        struct mesh_table *tbl;
        struct mesh_path *mpath;
@@ -892,8 +922,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
        int hash_idx;
        int err = 0;
 
-       read_lock_bh(&pathtbl_resize_lock);
-       tbl = resize_dereference_mesh_paths();
+       tbl = resize_dereference_paths(rcu_tbl);
        hash_idx = mesh_table_hash(addr, sdata, tbl);
        bucket = &tbl->hash_buckets[hash_idx];
 
@@ -909,9 +938,50 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
 
        err = -ENXIO;
 enddel:
-       mesh_paths_generation++;
        spin_unlock(&tbl->hashwlock[hash_idx]);
+       return err;
+}
+
+/**
+ * mesh_path_del - delete a mesh path from the table
+ *
+ * @addr: dst address (ETH_ALEN length)
+ * @sdata: local subif
+ *
+ * Returns: 0 if successful
+ */
+int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+       int err = 0;
+
+       /* flush relevant mpp entries first */
+       mpp_flush_by_proxy(sdata, addr);
+
+       read_lock_bh(&pathtbl_resize_lock);
+       err = table_path_del(mesh_paths, sdata, addr);
+       mesh_paths_generation++;
        read_unlock_bh(&pathtbl_resize_lock);
+
+       return err;
+}
+
+/**
+ * mpp_path_del - delete a mesh proxy path from the table
+ *
+ * @addr: addr address (ETH_ALEN length)
+ * @sdata: local subif
+ *
+ * Returns: 0 if successful
+ */
+static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+       int err = 0;
+
+       read_lock_bh(&pathtbl_resize_lock);
+       err = table_path_del(mpp_paths, sdata, addr);
+       mpp_paths_generation++;
+       read_unlock_bh(&pathtbl_resize_lock);
+
        return err;
 }
 
@@ -1076,7 +1146,6 @@ int mesh_pathtbl_init(void)
                return -ENOMEM;
        tbl_path->free_node = &mesh_path_node_free;
        tbl_path->copy_node = &mesh_path_node_copy;
-       tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
        tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
        if (!tbl_path->known_gates) {
                ret = -ENOMEM;
@@ -1092,7 +1161,6 @@ int mesh_pathtbl_init(void)
        }
        tbl_mpp->free_node = &mesh_path_node_free;
        tbl_mpp->copy_node = &mesh_path_node_copy;
-       tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
        tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
        if (!tbl_mpp->known_gates) {
                ret = -ENOMEM;
@@ -1131,6 +1199,17 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
                     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
                        mesh_path_del(mpath->sdata, mpath->dst);
        }
+
+       tbl = rcu_dereference(mpp_paths);
+       for_each_mesh_entry(tbl, node, i) {
+               if (node->mpath->sdata != sdata)
+                       continue;
+               mpath = node->mpath;
+               if ((!(mpath->flags & MESH_PATH_FIXED)) &&
+                   time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
+                       mpp_path_del(mpath->sdata, mpath->dst);
+       }
+
        rcu_read_unlock();
 }
 
index bd3d55eb21d4f8fb5d4cf7a0da75a506ecdf00eb..a07e93c21c9ede90bd94328f1e4aaa51476d58bd 100644 (file)
@@ -976,6 +976,10 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                        mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
                        goto out;
                }
+
+               /* new matching peer */
+               event = OPN_ACPT;
+               goto out;
        } else {
                if (!test_sta_flag(sta, WLAN_STA_AUTH)) {
                        mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
@@ -985,12 +989,6 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                        goto out;
        }
 
-       /* new matching peer */
-       if (!sta) {
-               event = OPN_ACPT;
-               goto out;
-       }
-
        switch (ftype) {
        case WLAN_SP_MESH_PEERING_OPEN:
                if (!matches_local)
index 1c342e2592c4ab8af62871a4e44da0f3029140b0..cbb752ab21720f585a3f2412a3490a552b2786e6 100644 (file)
@@ -196,16 +196,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
 
        /* check 40 MHz support, if we have it */
        if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
-               switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
-               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                       chandef->width = NL80211_CHAN_WIDTH_40;
-                       chandef->center_freq1 += 10;
-                       break;
-               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                       chandef->width = NL80211_CHAN_WIDTH_40;
-                       chandef->center_freq1 -= 10;
-                       break;
-               }
+               ieee80211_chandef_ht_oper(ht_oper, chandef);
        } else {
                /* 40 MHz (and 80 MHz) must be supported for VHT */
                ret = IEEE80211_STA_DISABLE_VHT;
@@ -219,35 +210,11 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       vht_chandef.chan = channel;
-       vht_chandef.center_freq1 =
-               ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
-                                              channel->band);
-       vht_chandef.center_freq2 = 0;
-
-       switch (vht_oper->chan_width) {
-       case IEEE80211_VHT_CHANWIDTH_USE_HT:
-               vht_chandef.width = chandef->width;
-               vht_chandef.center_freq1 = chandef->center_freq1;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_80MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_80;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_160MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_160;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
-               vht_chandef.center_freq2 =
-                       ieee80211_channel_to_frequency(
-                               vht_oper->center_freq_seg2_idx,
-                               channel->band);
-               break;
-       default:
+       vht_chandef = *chandef;
+       if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
                if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
-                                  "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
-                                  vht_oper->chan_width);
+                                  "AP VHT information is invalid, disable VHT\n");
                ret = IEEE80211_STA_DISABLE_VHT;
                goto out;
        }
@@ -1638,8 +1605,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
 
 void ieee80211_dfs_cac_timer_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work = to_delayed_work(work);
        struct ieee80211_sub_if_data *sdata =
                container_of(delayed_work, struct ieee80211_sub_if_data,
                             dfs_cac_timer_work);
@@ -2079,6 +2045,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
        memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
        memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+
+       /* reset MU-MIMO ownership and group data */
+       memset(sdata->vif.bss_conf.mu_group.membership, 0,
+              sizeof(sdata->vif.bss_conf.mu_group.membership));
+       memset(sdata->vif.bss_conf.mu_group.position, 0,
+              sizeof(sdata->vif.bss_conf.mu_group.position));
+       changed |= BSS_CHANGED_MU_GROUPS;
        sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -3571,6 +3544,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                elems.ht_cap_elem, elems.ht_operation,
                                elems.vht_operation, bssid, &changed)) {
                mutex_unlock(&local->sta_mtx);
+               sdata_info(sdata,
+                          "failed to follow AP %pM bandwidth change, disconnect\n",
+                          bssid);
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_DEAUTH_LEAVING,
                                       true, deauth_buf);
@@ -3946,11 +3922,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                         * We actually lost the connection ... or did we?
                         * Let's make sure!
                         */
-                       wiphy_debug(local->hw.wiphy,
-                                   "%s: No probe response from AP %pM"
-                                   " after %dms, disconnecting.\n",
-                                   sdata->name,
-                                   bssid, probe_wait_ms);
+                       mlme_dbg(sdata,
+                                "No probe response from AP %pM after %dms, disconnecting.\n",
+                                bssid, probe_wait_ms);
 
                        ieee80211_sta_connection_lost(sdata, bssid,
                                WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
@@ -4005,8 +3979,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
                if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
                        ieee80211_queue_work(&sdata->local->hw,
                                             &sdata->u.mgd.monitor_work);
-               /* and do all the other regular work too */
-               ieee80211_queue_work(&sdata->local->hw, &sdata->work);
        }
 }
 
@@ -4538,6 +4510,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->associated) {
                u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
+               sdata_info(sdata,
+                          "disconnect from AP %pM for new auth to %pM\n",
+                          ifmgd->associated->bssid, req->bss->bssid);
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
@@ -4606,6 +4581,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->associated) {
                u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
+               sdata_info(sdata,
+                          "disconnect from AP %pM for new assoc to %pM\n",
+                          ifmgd->associated->bssid, req->bss->bssid);
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
index 8b2f4eaac2ba65f8e25ae47dc316bf288ef8e0e9..55a9c5b94ce128d06ffb3154173fa2ea62a53f9b 100644 (file)
@@ -252,14 +252,11 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
 static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
                                         unsigned long start_time)
 {
-       struct ieee80211_local *local = roc->sdata->local;
-
        if (WARN_ON(roc->notified))
                return;
 
        roc->start_time = start_time;
        roc->started = true;
-       roc->hw_begun = true;
 
        if (roc->mgmt_tx_cookie) {
                if (!WARN_ON(!roc->frame)) {
@@ -274,9 +271,6 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
        }
 
        roc->notified = true;
-
-       if (!local->ops->remain_on_channel)
-               ieee80211_recalc_sw_work(local, start_time);
 }
 
 static void ieee80211_hw_roc_start(struct work_struct *work)
@@ -291,6 +285,7 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
                if (!roc->started)
                        break;
 
+               roc->hw_begun = true;
                ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
        }
 
@@ -413,6 +408,10 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
                return;
        }
 
+       /* defer roc if driver is not started (i.e. during reconfig) */
+       if (local->in_reconfig)
+               return;
+
        roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
                               list);
 
@@ -534,8 +533,10 @@ ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
         * begin, otherwise they'll both be marked properly by the work
         * struct that runs once the driver notifies us of the beginning
         */
-       if (cur_roc->hw_begun)
+       if (cur_roc->hw_begun) {
+               new_roc->hw_begun = true;
                ieee80211_handle_roc_started(new_roc, now);
+       }
 
        return true;
 }
@@ -658,6 +659,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
                        queued = true;
                        roc->on_channel = tmp->on_channel;
                        ieee80211_handle_roc_started(roc, now);
+                       ieee80211_recalc_sw_work(local, now);
                        break;
                }
 
index 3ece7d1034c81ae8749cada074fbebecbe06d57f..b54f398cda5d0e2d561f2383f1d049a5410fcf68 100644 (file)
@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
         * computing cur_tp
         */
        tmp_mrs = &mi->r[idx].stats;
-       tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
+       tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
        tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
 
        return tmp_cur_tp;
index 3928dbd24e257e68627aa977cc54a19aaa996339..702328bcabdc2f49b6b2252f304e039ebe37b9af 100644 (file)
@@ -414,15 +414,16 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
            (max_tp_group != MINSTREL_CCK_GROUP))
                return;
 
+       max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+       max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+       max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+
        if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
                cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
                                                    mrs->prob_ewma);
                if (cur_tp_avg > tmp_tp_avg)
                        mi->max_prob_rate = index;
 
-               max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
-               max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
-               max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
                max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
                                                        max_gpr_idx,
                                                        max_gpr_prob);
@@ -431,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
        } else {
                if (mrs->prob_ewma > tmp_prob)
                        mi->max_prob_rate = index;
-               if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma)
+               if (mrs->prob_ewma > max_gpr_prob)
                        mg->max_group_prob_rate = index;
        }
 }
@@ -1334,7 +1335,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
        prob = mi->groups[i].rates[j].prob_ewma;
 
        /* convert tp_avg from pkt per second in kbps */
-       tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
+       tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
+       tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
 
        return tp_avg;
 }
index bc081850ac0e5538241bd1cab37b65aeefd2c244..91279576f4a716d00cfd810e5d6db18306520c1c 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,7 @@
 #include <linux/etherdevice.h>
 #include <linux/rcupdate.h>
 #include <linux/export.h>
+#include <linux/bitops.h>
 #include <net/mac80211.h>
 #include <net/ieee80211_radiotap.h>
 #include <asm/unaligned.h>
@@ -122,7 +124,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
        hdr = (void *)(skb->data + rtap_vendor_space);
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-                           RX_FLAG_FAILED_PLCP_CRC))
+                           RX_FLAG_FAILED_PLCP_CRC |
+                           RX_FLAG_ONLY_MONITOR))
                return true;
 
        if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -507,7 +510,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                return NULL;
        }
 
-       if (!local->monitors) {
+       if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
                if (should_drop_frame(origskb, present_fcs_len,
                                      rtap_vendor_space)) {
                        dev_kfree_skb(origskb);
@@ -797,6 +800,26 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 }
 
+static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
+                                             int index)
+{
+       struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
+       struct sk_buff *tail = skb_peek_tail(frames);
+       struct ieee80211_rx_status *status;
+
+       if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
+               return true;
+
+       if (!tail)
+               return false;
+
+       status = IEEE80211_SKB_RXCB(tail);
+       if (status->flag & RX_FLAG_AMSDU_MORE)
+               return false;
+
+       return true;
+}
+
 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
                                            struct tid_ampdu_rx *tid_agg_rx,
                                            int index,
@@ -811,7 +834,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
        if (skb_queue_empty(skb_list))
                goto no_frame;
 
-       if (!ieee80211_rx_reorder_ready(skb_list)) {
+       if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                __skb_queue_purge(skb_list);
                goto no_frame;
        }
@@ -825,6 +848,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
        }
 
 no_frame:
+       tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
        tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
 }
 
@@ -865,7 +889,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
        /* release the buffer until next missing frame */
        index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
-       if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) &&
+       if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
            tid_agg_rx->stored_mpdu_num) {
                /*
                 * No buffers ready to be released, but check whether any
@@ -874,8 +898,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
                int skipped = 1;
                for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
                     j = (j + 1) % tid_agg_rx->buf_size) {
-                       if (!ieee80211_rx_reorder_ready(
-                                       &tid_agg_rx->reorder_buf[j])) {
+                       if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
                                skipped++;
                                continue;
                        }
@@ -902,8 +925,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
                                 skipped) & IEEE80211_SN_MASK;
                        skipped = 0;
                }
-       } else while (ieee80211_rx_reorder_ready(
-                               &tid_agg_rx->reorder_buf[index])) {
+       } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
                                                frames);
                index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
@@ -914,8 +936,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
                for (; j != (index - 1) % tid_agg_rx->buf_size;
                     j = (j + 1) % tid_agg_rx->buf_size) {
-                       if (ieee80211_rx_reorder_ready(
-                                       &tid_agg_rx->reorder_buf[j]))
+                       if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
                                break;
                }
 
@@ -986,7 +1007,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
        index = mpdu_seq_num % tid_agg_rx->buf_size;
 
        /* check if we already stored this frame */
-       if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) {
+       if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                dev_kfree_skb(skb);
                goto out;
        }
@@ -1099,6 +1120,9 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 
+       if (status->flag & RX_FLAG_DUP_VALIDATED)
+               return RX_CONTINUE;
+
        /*
         * Drop duplicate 802.11 retransmissions
         * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
@@ -2199,9 +2223,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
        skb->dev = dev;
        __skb_queue_head_init(&frame_list);
 
-       if (skb_linearize(skb))
-               return RX_DROP_UNUSABLE;
-
        ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
                                 rx->sdata->vif.type,
                                 rx->local->hw.extra_tx_headroom, true);
@@ -2231,7 +2252,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-       u16 q, hdrlen;
+       u16 ac, q, hdrlen;
 
        hdr = (struct ieee80211_hdr *) skb->data;
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2290,6 +2311,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                        spin_lock_bh(&mppath->state_lock);
                        if (!ether_addr_equal(mppath->mpp, mpp_addr))
                                memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
+                       mppath->exp_time = jiffies;
                        spin_unlock_bh(&mppath->state_lock);
                }
                rcu_read_unlock();
@@ -2300,7 +2322,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
            ether_addr_equal(sdata->vif.addr, hdr->addr3))
                return RX_CONTINUE;
 
-       q = ieee80211_select_queue_80211(sdata, skb, hdr);
+       ac = ieee80211_select_queue_80211(sdata, skb, hdr);
+       q = sdata->vif.hw_queue[ac];
        if (ieee80211_queue_stopped(&local->hw, q)) {
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
                return RX_DROP_MONITOR;
@@ -2738,6 +2761,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                                                    opmode, status->band);
                        goto handled;
                }
+               case WLAN_VHT_ACTION_GROUPID_MGMT: {
+                       if (len < IEEE80211_MIN_ACTION_SIZE + 25)
+                               goto invalid;
+                       goto queue;
+               }
                default:
                        break;
                }
@@ -3275,6 +3303,85 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
        ieee80211_rx_handlers(&rx, &frames);
 }
 
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+                                         u16 ssn, u64 filtered,
+                                         u16 received_mpdus)
+{
+       struct sta_info *sta;
+       struct tid_ampdu_rx *tid_agg_rx;
+       struct sk_buff_head frames;
+       struct ieee80211_rx_data rx = {
+               /* This is OK -- must be QoS data frame */
+               .security_idx = tid,
+               .seqno_idx = tid,
+       };
+       int i, diff;
+
+       if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
+               return;
+
+       __skb_queue_head_init(&frames);
+
+       sta = container_of(pubsta, struct sta_info, sta);
+
+       rx.sta = sta;
+       rx.sdata = sta->sdata;
+       rx.local = sta->local;
+
+       rcu_read_lock();
+       tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+       if (!tid_agg_rx)
+               goto out;
+
+       spin_lock_bh(&tid_agg_rx->reorder_lock);
+
+       if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
+               int release;
+
+               /* release all frames in the reorder buffer */
+               release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
+                          IEEE80211_SN_MODULO;
+               ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
+                                                release, &frames);
+               /* update ssn to match received ssn */
+               tid_agg_rx->head_seq_num = ssn;
+       } else {
+               ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
+                                                &frames);
+       }
+
+       /* handle the case that received ssn is behind the mac ssn.
+        * it can be tid_agg_rx->buf_size behind and still be valid */
+       diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
+       if (diff >= tid_agg_rx->buf_size) {
+               tid_agg_rx->reorder_buf_filtered = 0;
+               goto release;
+       }
+       filtered = filtered >> diff;
+       ssn += diff;
+
+       /* update bitmap */
+       for (i = 0; i < tid_agg_rx->buf_size; i++) {
+               int index = (ssn + i) % tid_agg_rx->buf_size;
+
+               tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
+               if (filtered & BIT_ULL(i))
+                       tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
+       }
+
+       /* now process also frames that the filter marking released */
+       ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
+
+release:
+       spin_unlock_bh(&tid_agg_rx->reorder_lock);
+
+       ieee80211_rx_handlers(&rx, &frames);
+
+ out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
+
 /* main receive path */
 
 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
index a413e52f7691418116d928f684a0fc27b308c2d6..ae980ce8daff6023bd8e5f113dee6a57b13408cc 100644 (file)
@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
        bool was_scanning = local->scanning;
        struct cfg80211_scan_request *scan_req;
        struct ieee80211_sub_if_data *scan_sdata;
+       struct ieee80211_sub_if_data *sdata;
 
        lockdep_assert_held(&local->mtx);
 
@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
 
        ieee80211_mlme_notify_scan_completed(local);
        ieee80211_ibss_notify_scan_completed(local);
-       ieee80211_mesh_notify_scan_completed(local);
+
+       /* Requeue all the work that might have been ignored while
+        * the scan was in progress; if there was none this will
+        * just be a no-op for the particular interface.
+        */
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       }
+
        if (was_scanning)
                ieee80211_start_next_roc(local);
 }
@@ -1213,6 +1223,14 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
 
        trace_api_sched_scan_stopped(local);
 
+       /*
+        * this shouldn't really happen, so for simplicity
+        * simply ignore it, and let mac80211 reconfigure
+        * the sched scan later on.
+        */
+       if (local->in_reconfig)
+               return;
+
        schedule_work(&local->sched_scan_stopped_work);
 }
 EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
index 4402ad5b27d1530bf4cc9a0d674c6538f73cb568..d20bab5c146c9d435ee67d6a11eec746db1b1bfa 100644 (file)
@@ -116,6 +116,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
 
                        ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
                        atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]);
+                       txqi->byte_cnt = 0;
                }
        }
 
@@ -498,11 +499,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct station_info sinfo;
+       struct station_info *sinfo;
        int err = 0;
 
        lockdep_assert_held(&local->sta_mtx);
 
+       sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
+       if (!sinfo) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+
        /* check if STA exists already */
        if (sta_info_get_bss(sdata, sta->sta.addr)) {
                err = -EEXIST;
@@ -530,14 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* accept BA sessions now */
        clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
-       ieee80211_recalc_min_chandef(sdata);
        ieee80211_sta_debugfs_add(sta);
        rate_control_add_sta_debugfs(sta);
 
-       memset(&sinfo, 0, sizeof(sinfo));
-       sinfo.filled = 0;
-       sinfo.generation = local->sta_generation;
-       cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+       sinfo->generation = local->sta_generation;
+       cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+       kfree(sinfo);
 
        sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
 
@@ -557,6 +562,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        __cleanup_single_sta(sta);
  out_err:
        mutex_unlock(&local->sta_mtx);
+       kfree(sinfo);
        rcu_read_lock();
        return err;
 }
@@ -898,7 +904,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
 {
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct station_info sinfo = {};
+       struct station_info *sinfo;
        int ret;
 
        /*
@@ -936,12 +942,14 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
 
        sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
 
-       sta_set_sinfo(sta, &sinfo);
-       cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+       sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+       if (sinfo)
+               sta_set_sinfo(sta, sinfo);
+       cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+       kfree(sinfo);
 
        rate_control_remove_sta_debugfs(sta);
        ieee80211_sta_debugfs_remove(sta);
-       ieee80211_recalc_min_chandef(sdata);
 
        cleanup_single_sta(sta);
 }
@@ -1453,7 +1461,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
 
        more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids);
 
-       if (reason == IEEE80211_FRAME_RELEASE_PSPOLL)
+       if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL)
                driver_release_tids =
                        BIT(find_highest_prio_tid(driver_release_tids));
 
@@ -1808,14 +1816,17 @@ int sta_info_move_state(struct sta_info *sta,
                        clear_bit(WLAN_STA_AUTH, &sta->_flags);
                break;
        case IEEE80211_STA_AUTH:
-               if (sta->sta_state == IEEE80211_STA_NONE)
+               if (sta->sta_state == IEEE80211_STA_NONE) {
                        set_bit(WLAN_STA_AUTH, &sta->_flags);
-               else if (sta->sta_state == IEEE80211_STA_ASSOC)
+               } else if (sta->sta_state == IEEE80211_STA_ASSOC) {
                        clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+                       ieee80211_recalc_min_chandef(sta->sdata);
+               }
                break;
        case IEEE80211_STA_ASSOC:
                if (sta->sta_state == IEEE80211_STA_AUTH) {
                        set_bit(WLAN_STA_ASSOC, &sta->_flags);
+                       ieee80211_recalc_min_chandef(sta->sdata);
                } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
                        if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
                            (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
index d6051629ed155859819a591960183c0062c230d9..053f5c4fa495ba02cd99c5ddb201ce0ad0f7aeff 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright 2002-2005, Devicescape Software, Inc.
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -167,6 +168,8 @@ struct tid_ampdu_tx {
  *
  * @reorder_buf: buffer to reorder incoming aggregated MPDUs. An MPDU may be an
  *     A-MSDU with individually reported subframes.
+ * @reorder_buf_filtered: bitmap indicating where there are filtered frames in
+ *     the reorder buffer that should be ignored when releasing frames
  * @reorder_time: jiffies when skb was added
  * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
  * @reorder_timer: releases expired frames from the reorder buffer.
@@ -194,6 +197,7 @@ struct tid_ampdu_tx {
 struct tid_ampdu_rx {
        struct rcu_head rcu_head;
        spinlock_t reorder_lock;
+       u64 reorder_buf_filtered;
        struct sk_buff_head *reorder_buf;
        unsigned long *reorder_time;
        struct timer_list session_timer;
@@ -212,20 +216,21 @@ struct tid_ampdu_rx {
 /**
  * struct sta_ampdu_mlme - STA aggregation information.
  *
+ * @mtx: mutex to protect all TX data (except non-NULL assignments
+ *     to tid_tx[idx], which are protected by the sta spinlock)
+ *     tid_start_tx is also protected by sta->lock.
  * @tid_rx: aggregation info for Rx per TID -- RCU protected
- * @tid_tx: aggregation info for Tx per TID
- * @tid_start_tx: sessions where start was requested
- * @addba_req_num: number of times addBA request has been sent.
- * @last_addba_req_time: timestamp of the last addBA request.
- * @dialog_token_allocator: dialog token enumerator for each new session;
- * @work: work struct for starting/stopping aggregation
  * @tid_rx_timer_expired: bitmap indicating on which TIDs the
  *     RX timer expired until the work for it runs
  * @tid_rx_stop_requested:  bitmap indicating which BA sessions per TID the
  *     driver requested to close until the work for it runs
- * @mtx: mutex to protect all TX data (except non-NULL assignments
- *     to tid_tx[idx], which are protected by the sta spinlock)
- *     tid_start_tx is also protected by sta->lock.
+ * @agg_session_valid: bitmap indicating which TID has a rx BA session open on
+ * @work: work struct for starting/stopping aggregation
+ * @tid_tx: aggregation info for Tx per TID
+ * @tid_start_tx: sessions where start was requested
+ * @last_addba_req_time: timestamp of the last addBA request.
+ * @addba_req_num: number of times addBA request has been sent.
+ * @dialog_token_allocator: dialog token enumerator for each new session;
  */
 struct sta_ampdu_mlme {
        struct mutex mtx;
@@ -233,6 +238,7 @@ struct sta_ampdu_mlme {
        struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS];
        unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
        unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+       unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
        /* tx */
        struct work_struct work;
        struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
index 5bad05e9af90fc76201f4bd9a54b931ffd5c61bc..6101deb805a830a80acccbe05499ca4f4fc33988 100644 (file)
@@ -51,6 +51,11 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
        struct ieee80211_hdr *hdr = (void *)skb->data;
        int ac;
 
+       if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+               ieee80211_free_txskb(&local->hw, skb);
+               return;
+       }
+
        /*
         * This skb 'survived' a round-trip through the driver, and
         * hopefully the driver didn't mangle it too badly. However,
index a6b4442776a0519251b1a7650007343a3541181e..2b0a17ee907abff27d2fef4c93e8d2a03eda4c17 100644 (file)
 #define KEY_PR_FMT     " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
 #define KEY_PR_ARG     __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
 
-
+#define AMPDU_ACTION_ENTRY     __field(enum ieee80211_ampdu_mlme_action,               \
+                                       ieee80211_ampdu_mlme_action)                    \
+                               STA_ENTRY                                               \
+                               __field(u16, tid)                                       \
+                               __field(u16, ssn)                                       \
+                               __field(u8, buf_size)                                   \
+                               __field(bool, amsdu)                                    \
+                               __field(u16, timeout)
+#define AMPDU_ACTION_ASSIGN    STA_NAMED_ASSIGN(params->sta);                          \
+                               __entry->tid = params->tid;                             \
+                               __entry->ssn = params->ssn;                             \
+                               __entry->buf_size = params->buf_size;                   \
+                               __entry->amsdu = params->amsdu;                         \
+                               __entry->timeout = params->timeout;
+#define AMPDU_ACTION_PR_FMT    STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d"
+#define AMPDU_ACTION_PR_ARG    STA_PR_ARG, __entry->tid, __entry->ssn,                 \
+                               __entry->buf_size, __entry->amsdu, __entry->timeout
 
 /*
  * Tracing for driver callbacks.
@@ -970,38 +986,25 @@ DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
 TRACE_EVENT(drv_ampdu_action,
        TP_PROTO(struct ieee80211_local *local,
                 struct ieee80211_sub_if_data *sdata,
-                enum ieee80211_ampdu_mlme_action action,
-                struct ieee80211_sta *sta, u16 tid,
-                u16 *ssn, u8 buf_size, bool amsdu),
+                struct ieee80211_ampdu_params *params),
 
-       TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu),
+       TP_ARGS(local, sdata, params),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
-               STA_ENTRY
-               __field(u32, action)
-               __field(u16, tid)
-               __field(u16, ssn)
-               __field(u8, buf_size)
-               __field(bool, amsdu)
                VIF_ENTRY
+               AMPDU_ACTION_ENTRY
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               STA_ASSIGN;
-               __entry->action = action;
-               __entry->tid = tid;
-               __entry->ssn = ssn ? *ssn : 0;
-               __entry->buf_size = buf_size;
-               __entry->amsdu = amsdu;
+               AMPDU_ACTION_ASSIGN;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d",
-               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
-               __entry->tid, __entry->buf_size, __entry->amsdu
+               LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT,
+               LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG
        )
 );
 
index 3311ce0f3d6c2f9c93c075458159935c631d144d..7bb67fa9f4d26ee5acf28b411b8558f4420f40c0 100644 (file)
@@ -710,6 +710,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        info->control.short_preamble = txrc.short_preamble;
 
+       /* don't ask rate control when rate already injected via radiotap */
+       if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
+               return TX_CONTINUE;
+
        if (tx->sta)
                assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
 
@@ -1266,7 +1270,11 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
        if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
                netif_stop_subqueue(sdata->dev, ac);
 
-       skb_queue_tail(&txqi->queue, skb);
+       spin_lock_bh(&txqi->queue.lock);
+       txqi->byte_cnt += skb->len;
+       __skb_queue_tail(&txqi->queue, skb);
+       spin_unlock_bh(&txqi->queue.lock);
+
        drv_wake_tx_queue(local, txqi);
 
        return;
@@ -1294,6 +1302,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
        if (!skb)
                goto out;
 
+       txqi->byte_cnt -= skb->len;
+
        atomic_dec(&sdata->txqs_len[ac]);
        if (__netif_subqueue_stopped(sdata->dev, ac))
                ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
@@ -1665,15 +1675,24 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx(sdata, sta, skb, false);
 }
 
-static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
+static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
+                                       struct sk_buff *skb)
 {
        struct ieee80211_radiotap_iterator iterator;
        struct ieee80211_radiotap_header *rthdr =
                (struct ieee80211_radiotap_header *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_supported_band *sband =
+               local->hw.wiphy->bands[info->band];
        int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
                                                   NULL);
        u16 txflags;
+       u16 rate = 0;
+       bool rate_found = false;
+       u8 rate_retries = 0;
+       u16 rate_flags = 0;
+       u8 mcs_known, mcs_flags;
+       int i;
 
        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
                       IEEE80211_TX_CTL_DONTFRAG;
@@ -1724,6 +1743,35 @@ static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
                                info->flags |= IEEE80211_TX_CTL_NO_ACK;
                        break;
 
+               case IEEE80211_RADIOTAP_RATE:
+                       rate = *iterator.this_arg;
+                       rate_flags = 0;
+                       rate_found = true;
+                       break;
+
+               case IEEE80211_RADIOTAP_DATA_RETRIES:
+                       rate_retries = *iterator.this_arg;
+                       break;
+
+               case IEEE80211_RADIOTAP_MCS:
+                       mcs_known = iterator.this_arg[0];
+                       mcs_flags = iterator.this_arg[1];
+                       if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
+                               break;
+
+                       rate_found = true;
+                       rate = iterator.this_arg[2];
+                       rate_flags = IEEE80211_TX_RC_MCS;
+
+                       if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
+                           mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
+                               rate_flags |= IEEE80211_TX_RC_SHORT_GI;
+
+                       if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
+                           mcs_flags & IEEE80211_RADIOTAP_MCS_BW_40)
+                               rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+                       break;
+
                /*
                 * Please update the file
                 * Documentation/networking/mac80211-injection.txt
@@ -1738,6 +1786,32 @@ static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
        if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
                return false;
 
+       if (rate_found) {
+               info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
+
+               for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+                       info->control.rates[i].idx = -1;
+                       info->control.rates[i].flags = 0;
+                       info->control.rates[i].count = 0;
+               }
+
+               if (rate_flags & IEEE80211_TX_RC_MCS) {
+                       info->control.rates[0].idx = rate;
+               } else {
+                       for (i = 0; i < sband->n_bitrates; i++) {
+                               if (rate * 5 != sband->bitrates[i].bitrate)
+                                       continue;
+
+                               info->control.rates[0].idx = i;
+                               break;
+                       }
+               }
+
+               info->control.rates[0].flags = rate_flags;
+               info->control.rates[0].count = min_t(u8, rate_retries + 1,
+                                                    local->hw.max_rate_tries);
+       }
+
        /*
         * remove the radiotap header
         * iterator->_max_length was sanity-checked against
@@ -1819,7 +1893,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
                      IEEE80211_TX_CTL_INJECTED;
 
        /* process and remove the injection radiotap header */
-       if (!ieee80211_parse_tx_radiotap(skb))
+       if (!ieee80211_parse_tx_radiotap(local, skb))
                goto fail;
 
        rcu_read_lock();
@@ -2099,8 +2173,11 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
                                        mpp_lookup = true;
                        }
 
-                       if (mpp_lookup)
+                       if (mpp_lookup) {
                                mppath = mpp_path_lookup(sdata, skb->data);
+                               if (mppath)
+                                       mppath->exp_time = jiffies;
+                       }
 
                        if (mppath && mpath)
                                mesh_path_del(mpath->sdata, mpath->dst);
index 3943d4bf289c29b436765d82dd0f241c8b7f5530..74dd4d72b2332281f0ee3e73138b8fec3a53e380 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2015  Intel Deutschland GmbH
+ * Copyright (C) 2015-2016     Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1928,6 +1928,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                          BSS_CHANGED_IDLE |
                          BSS_CHANGED_TXPOWER;
 
+               if (sdata->flags & IEEE80211_SDATA_MU_MIMO_OWNER)
+                       changed |= BSS_CHANGED_MU_GROUPS;
+
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC |
@@ -2043,16 +2046,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                 */
                if (sched_scan_req->n_scan_plans > 1 ||
                    __ieee80211_request_sched_scan_start(sched_scan_sdata,
-                                                        sched_scan_req))
+                                                        sched_scan_req)) {
+                       RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
+                       RCU_INIT_POINTER(local->sched_scan_req, NULL);
                        sched_scan_stopped = true;
+               }
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
                cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
  wake_up:
-       local->in_reconfig = false;
-       barrier();
+       if (local->in_reconfig) {
+               local->in_reconfig = false;
+               barrier();
+
+               /* Restart deferred ROCs */
+               mutex_lock(&local->mtx);
+               ieee80211_start_next_roc(local);
+               mutex_unlock(&local->mtx);
+       }
 
        if (local->monitors == local->open_count && local->monitors > 0)
                ieee80211_add_virtual_monitor(local);
@@ -2361,10 +2374,23 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
 
        switch (chandef->width) {
        case NL80211_CHAN_WIDTH_160:
-               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+               /*
+                * Convert 160 MHz channel width to new style as interop
+                * workaround.
+                */
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+               vht_oper->center_freq_seg2_idx = vht_oper->center_freq_seg1_idx;
+               if (chandef->chan->center_freq < chandef->center_freq1)
+                       vht_oper->center_freq_seg1_idx -= 8;
+               else
+                       vht_oper->center_freq_seg1_idx += 8;
                break;
        case NL80211_CHAN_WIDTH_80P80:
-               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+               /*
+                * Convert 80+80 MHz channel width to new style as interop
+                * workaround.
+                */
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
                break;
        case NL80211_CHAN_WIDTH_80:
                vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
@@ -2380,17 +2406,13 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
        return pos + sizeof(struct ieee80211_vht_operation);
 }
 
-void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                 const struct ieee80211_ht_operation *ht_oper,
-                                 struct cfg80211_chan_def *chandef)
+bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
+                              struct cfg80211_chan_def *chandef)
 {
        enum nl80211_channel_type channel_type;
 
-       if (!ht_oper) {
-               cfg80211_chandef_create(chandef, control_chan,
-                                       NL80211_CHAN_NO_HT);
-               return;
-       }
+       if (!ht_oper)
+               return false;
 
        switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
        case IEEE80211_HT_PARAM_CHA_SEC_NONE:
@@ -2404,42 +2426,66 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
                break;
        default:
                channel_type = NL80211_CHAN_NO_HT;
+               return false;
        }
 
-       cfg80211_chandef_create(chandef, control_chan, channel_type);
+       cfg80211_chandef_create(chandef, chandef->chan, channel_type);
+       return true;
 }
 
-void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                  const struct ieee80211_vht_operation *oper,
-                                  struct cfg80211_chan_def *chandef)
+bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+                               struct cfg80211_chan_def *chandef)
 {
+       struct cfg80211_chan_def new = *chandef;
+       int cf1, cf2;
+
        if (!oper)
-               return;
+               return false;
 
-       chandef->chan = control_chan;
+       cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
+                                            chandef->chan->band);
+       cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
+                                            chandef->chan->band);
 
        switch (oper->chan_width) {
        case IEEE80211_VHT_CHANWIDTH_USE_HT:
                break;
        case IEEE80211_VHT_CHANWIDTH_80MHZ:
-               chandef->width = NL80211_CHAN_WIDTH_80;
+               new.width = NL80211_CHAN_WIDTH_80;
+               new.center_freq1 = cf1;
+               /* If needed, adjust based on the newer interop workaround. */
+               if (oper->center_freq_seg2_idx) {
+                       unsigned int diff;
+
+                       diff = abs(oper->center_freq_seg2_idx -
+                                  oper->center_freq_seg1_idx);
+                       if (diff == 8) {
+                               new.width = NL80211_CHAN_WIDTH_160;
+                               new.center_freq1 = cf2;
+                       } else if (diff > 8) {
+                               new.width = NL80211_CHAN_WIDTH_80P80;
+                               new.center_freq2 = cf2;
+                       }
+               }
                break;
        case IEEE80211_VHT_CHANWIDTH_160MHZ:
-               chandef->width = NL80211_CHAN_WIDTH_160;
+               new.width = NL80211_CHAN_WIDTH_160;
+               new.center_freq1 = cf1;
                break;
        case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-               chandef->width = NL80211_CHAN_WIDTH_80P80;
+               new.width = NL80211_CHAN_WIDTH_80P80;
+               new.center_freq1 = cf1;
+               new.center_freq2 = cf2;
                break;
        default:
-               break;
+               return false;
        }
 
-       chandef->center_freq1 =
-               ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
-                                              control_chan->band);
-       chandef->center_freq2 =
-               ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
-                                              control_chan->band);
+       if (!cfg80211_chandef_valid(&new))
+               return false;
+
+       *chandef = new;
+       return true;
 }
 
 int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
@@ -2662,6 +2708,18 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
                sband = local->hw.wiphy->bands[status->band];
                bitrate = sband->bitrates[status->rate_idx].bitrate;
                ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
+
+               if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+                       /* TODO: handle HT/VHT preambles */
+                       if (status->band == IEEE80211_BAND_5GHZ) {
+                               ts += 20 << shift;
+                               mpdu_offset += 2;
+                       } else if (status->flag & RX_FLAG_SHORTPRE) {
+                               ts += 96;
+                       } else {
+                               ts += 192;
+                       }
+               }
        }
 
        rate = cfg80211_calculate_bitrate(&ri);
@@ -3347,3 +3405,17 @@ void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
                txqi->txq.ac = IEEE80211_AC_BE;
        }
 }
+
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+                            unsigned long *frame_cnt,
+                            unsigned long *byte_cnt)
+{
+       struct txq_info *txqi = to_txq_info(txq);
+
+       if (frame_cnt)
+               *frame_cnt = txqi->queue.qlen;
+
+       if (byte_cnt)
+               *byte_cnt = txqi->byte_cnt;
+}
+EXPORT_SYMBOL(ieee80211_txq_get_depth);
index c38b2f07a919e20dc22363fe80911f5f5a0b004f..341d192cea522c8f25c695083b84ea9bd5013987 100644 (file)
@@ -1,6 +1,9 @@
 /*
  * VHT handling
  *
+ * Portions of this file
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -278,6 +281,23 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        }
 
        sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+
+       /* If HT IE reported 3839 bytes only, stay with that size. */
+       if (sta->sta.max_amsdu_len == IEEE80211_MAX_MPDU_LEN_HT_3839)
+               return;
+
+       switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+               break;
+       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
+               break;
+       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+       default:
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
+               break;
+       }
 }
 
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
@@ -425,6 +445,30 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
        return changed;
 }
 
+void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+                                struct ieee80211_mgmt *mgmt)
+{
+       struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+
+       if (!(sdata->flags & IEEE80211_SDATA_MU_MIMO_OWNER))
+               return;
+
+       if (!memcmp(mgmt->u.action.u.vht_group_notif.position,
+                   bss_conf->mu_group.position, WLAN_USER_POSITION_LEN) &&
+           !memcmp(mgmt->u.action.u.vht_group_notif.membership,
+                   bss_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN))
+               return;
+
+       memcpy(bss_conf->mu_group.membership,
+              mgmt->u.action.u.vht_group_notif.membership,
+              WLAN_MEMBERSHIP_LEN);
+       memcpy(bss_conf->mu_group.position,
+              mgmt->u.action.u.vht_group_notif.position,
+              WLAN_USER_POSITION_LEN);
+
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS);
+}
+
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta, u8 opmode,
                                 enum ieee80211_band band)
index 43d8c9896fa3e66ea7f76ee1b7934c3129a46fd3..f0f688db6213930ad568b645080eb171bb30f345 100644 (file)
@@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        };
        struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-       if (e.cidr == 0)
-               return -EINVAL;
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK;
 
@@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
        };
        struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-       if (e.cidr == 0)
-               return -EINVAL;
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK;
 
index 0328f725069332d0c84bbb3108192a2a28beeb2a..299edc6add5a6ac2c729e4caa6f9550b1cebe3df 100644 (file)
@@ -605,17 +605,13 @@ static const struct file_operations ip_vs_app_fops = {
 
 int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        INIT_LIST_HEAD(&ipvs->app_list);
-       proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
+       proc_create("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_fops);
        return 0;
 }
 
 void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        unregister_ip_vs_app(ipvs, NULL /* all */);
-       remove_proc_entry("ip_vs_app", net->proc_net);
+       remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
 }
index e7c1b052c2a3ac9cccd54ed1e4558d36bde080c4..404b2a4f4b5be90f630a20ff592e030f1ed4d671 100644 (file)
@@ -1376,8 +1376,6 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
        struct ip_vs_pe *old_pe;
        struct netns_ipvs *ipvs = svc->ipvs;
 
-       pr_info("%s: enter\n", __func__);
-
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
                ipvs->num_services--;
@@ -3947,7 +3945,6 @@ static struct notifier_block ip_vs_dst_notifier = {
 
 int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
        int i, idx;
 
        /* Initialize rs_table */
@@ -3974,9 +3971,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 
        spin_lock_init(&ipvs->tot_stats.lock);
 
-       proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
-       proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops);
-       proc_create("ip_vs_stats_percpu", 0, net->proc_net,
+       proc_create("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_fops);
+       proc_create("ip_vs_stats", 0, ipvs->net->proc_net, &ip_vs_stats_fops);
+       proc_create("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
                    &ip_vs_stats_percpu_fops);
 
        if (ip_vs_control_net_init_sysctl(ipvs))
@@ -3991,13 +3988,11 @@ err:
 
 void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        ip_vs_trash_cleanup(ipvs);
        ip_vs_control_net_cleanup_sysctl(ipvs);
-       remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
-       remove_proc_entry("ip_vs_stats", net->proc_net);
-       remove_proc_entry("ip_vs", net->proc_net);
+       remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
+       remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
+       remove_proc_entry("ip_vs", ipvs->net->proc_net);
        free_percpu(ipvs->tot_stats.cpustats);
 }
 
index 1b8d594e493a32f0ea461ade2d2a85387054a78d..c4e9ca016a88ac32e7677253a3b7e8ef30d91f5a 100644 (file)
@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
        const char *dptr;
        int retc;
 
-       ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+       retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
 
        /* Only useful with UDP */
-       if (iph.protocol != IPPROTO_UDP)
+       if (!retc || iph.protocol != IPPROTO_UDP)
                return -EINVAL;
        /* todo: IPv6 fragments:
         *       I think this only should be done for the first fragment. /HS
index 3cb3cb831591ef79515b4bde66d5db825a732afb..58882de06bd74f254033e0f06521f264c18c8d44 100644 (file)
@@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
 
+static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
+static __read_mostly bool nf_conntrack_locks_all;
+
+void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+{
+       spin_lock(lock);
+       while (unlikely(nf_conntrack_locks_all)) {
+               spin_unlock(lock);
+               spin_lock(&nf_conntrack_locks_all_lock);
+               spin_unlock(&nf_conntrack_locks_all_lock);
+               spin_lock(lock);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+
 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
 {
        h1 %= CONNTRACK_LOCKS;
@@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
        h1 %= CONNTRACK_LOCKS;
        h2 %= CONNTRACK_LOCKS;
        if (h1 <= h2) {
-               spin_lock(&nf_conntrack_locks[h1]);
+               nf_conntrack_lock(&nf_conntrack_locks[h1]);
                if (h1 != h2)
                        spin_lock_nested(&nf_conntrack_locks[h2],
                                         SINGLE_DEPTH_NESTING);
        } else {
-               spin_lock(&nf_conntrack_locks[h2]);
+               nf_conntrack_lock(&nf_conntrack_locks[h2]);
                spin_lock_nested(&nf_conntrack_locks[h1],
                                 SINGLE_DEPTH_NESTING);
        }
@@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void)
 {
        int i;
 
-       for (i = 0; i < CONNTRACK_LOCKS; i++)
-               spin_lock_nested(&nf_conntrack_locks[i], i);
+       spin_lock(&nf_conntrack_locks_all_lock);
+       nf_conntrack_locks_all = true;
+
+       for (i = 0; i < CONNTRACK_LOCKS; i++) {
+               spin_lock(&nf_conntrack_locks[i]);
+               spin_unlock(&nf_conntrack_locks[i]);
+       }
 }
 
 static void nf_conntrack_all_unlock(void)
 {
-       int i;
-
-       for (i = 0; i < CONNTRACK_LOCKS; i++)
-               spin_unlock(&nf_conntrack_locks[i]);
+       nf_conntrack_locks_all = false;
+       spin_unlock(&nf_conntrack_locks_all_lock);
 }
 
 unsigned int nf_conntrack_htable_size __read_mostly;
@@ -757,7 +775,7 @@ restart:
        hash = hash_bucket(_hash, net);
        for (; i < net->ct.htable_size; i++) {
                lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
-               spin_lock(lockp);
+               nf_conntrack_lock(lockp);
                if (read_seqcount_retry(&net->ct.generation, sequence)) {
                        spin_unlock(lockp);
                        goto restart;
@@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        for (; *bucket < net->ct.htable_size; (*bucket)++) {
                lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
                local_bh_disable();
-               spin_lock(lockp);
+               nf_conntrack_lock(lockp);
                if (*bucket < net->ct.htable_size) {
                        hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
                                if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
index bd9d31537905e4e372da427ac29ce5948f2af34c..3b40ec575cd56a7b73c0b25d9c8decdda979d842 100644 (file)
@@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
        }
        local_bh_disable();
        for (i = 0; i < net->ct.htable_size; i++) {
-               spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
                if (i < net->ct.htable_size) {
                        hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
                                unhelp(h, me);
index dbb1bb3edb456594184bb211332d1ee799af70ef..355e8552fd5b77682295493a22dc0a03d9338255 100644 (file)
@@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
 restart:
                lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
-               spin_lock(lockp);
+               nf_conntrack_lock(lockp);
                if (cb->args[0] >= net->ct.htable_size) {
                        spin_unlock(lockp);
                        goto out;
index b6605e0008013972cd94500db5bd626cb9bf3064..5eefe4a355c640cf85c8328740a877c6909880ba 100644 (file)
@@ -224,12 +224,12 @@ static int __init nf_tables_netdev_init(void)
 
        nft_register_chain_type(&nft_filter_chain_netdev);
        ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
-       if (ret < 0)
+       if (ret < 0) {
                nft_unregister_chain_type(&nft_filter_chain_netdev);
-
+               return ret;
+       }
        register_netdevice_notifier(&nf_tables_netdev_notifier);
-
-       return ret;
+       return 0;
 }
 
 static void __exit nf_tables_netdev_exit(void)
index 5d010f27ac018d079058000c11acc66d7086b57a..94837d236ab0e9a2fc3baa5d8fe7688a1b399afc 100644 (file)
@@ -307,12 +307,12 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
 
        local_bh_disable();
        for (i = 0; i < net->ct.htable_size; i++) {
-               spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
                if (i < net->ct.htable_size) {
                        hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
                                untimeout(h, timeout);
                }
-               spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
        }
        local_bh_enable();
 }
index 383c17138399aec2a5681580ddd3b6f200f29d67..b78c28ba465ff9c1eed6b6006b6c5e9d9da58072 100644 (file)
@@ -46,16 +46,14 @@ static void nft_byteorder_eval(const struct nft_expr *expr,
                switch (priv->op) {
                case NFT_BYTEORDER_NTOH:
                        for (i = 0; i < priv->len / 8; i++) {
-                               src64 = get_unaligned_be64(&src[i]);
-                               src64 = be64_to_cpu((__force __be64)src64);
+                               src64 = get_unaligned((u64 *)&src[i]);
                                put_unaligned_be64(src64, &dst[i]);
                        }
                        break;
                case NFT_BYTEORDER_HTON:
                        for (i = 0; i < priv->len / 8; i++) {
                                src64 = get_unaligned_be64(&src[i]);
-                               src64 = (__force u64)cpu_to_be64(src64);
-                               put_unaligned_be64(src64, &dst[i]);
+                               put_unaligned(src64, (u64 *)&dst[i]);
                        }
                        break;
                }
index a0eb2161e3efdb3476f692ce242cafaff30e7f18..d4a4619fcebc927acd1bf730e044d29813d096d3 100644 (file)
@@ -127,6 +127,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                               NF_CT_LABELS_MAX_SIZE - size);
                return;
        }
+#endif
        case NFT_CT_BYTES: /* fallthrough */
        case NFT_CT_PKTS: {
                const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
@@ -138,7 +139,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                memcpy(dest, &count, sizeof(count));
                return;
        }
-#endif
        default:
                break;
        }
index b7c43def0dc69e1cd03db238e0174283d895003a..e118397254af026fd23ed3097ddc78c8ee9fc0c4 100644 (file)
@@ -228,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        u8 nexthdr;
-       __be16 frag_off;
+       __be16 frag_off, oldlen, newlen;
        int tcphoff;
        int ret;
 
@@ -244,7 +244,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                return NF_DROP;
        if (ret > 0) {
                ipv6h = ipv6_hdr(skb);
-               ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
+               oldlen = ipv6h->payload_len;
+               newlen = htons(ntohs(oldlen) + ret);
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       skb->csum = csum_add(csum_sub(skb->csum, oldlen),
+                                            newlen);
+               ipv6h->payload_len = newlen;
        }
        return XT_CONTINUE;
 }
index 81dc1bb6e0168ed17cd2d60e6dc3a394f0f34e76..f1ffb34e253f4906275d4da99ce26306b94241eb 100644 (file)
@@ -2831,7 +2831,8 @@ static int netlink_dump(struct sock *sk)
         * reasonable static buffer based on the expected largest dump of a
         * single netdev. The outcome is MSG_TRUNC error.
         */
-       skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+       if (!netlink_rx_is_mmaped(sk))
+               skb_reserve(skb, skb_tailroom(skb) - alloc_size);
        netlink_skb_set_owner_r(skb, sk);
 
        len = cb->dump(skb, cb);
index 1605691d94144aee0fc50ffb17be05eca2b59675..de9cb19efb6a3e2774624c29b2fe5c977ef92fe4 100644 (file)
@@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
        struct vxlan_config conf = {
                .no_share = true,
                .flags = VXLAN_F_COLLECT_METADATA,
+               /* Don't restrict the packets that can be sent by MTU */
+               .mtu = IP_MAX_MTU,
        };
 
        if (!options) {
index 992396aa635ce1174f6e62f19ae3f19a550668c6..b7e7851ddc5d079f246e96ffe2372d659376258e 100644 (file)
@@ -1960,6 +1960,64 @@ static unsigned int run_filter(struct sk_buff *skb,
        return res;
 }
 
+static int __packet_rcv_vnet(const struct sk_buff *skb,
+                            struct virtio_net_hdr *vnet_hdr)
+{
+       *vnet_hdr = (const struct virtio_net_hdr) { 0 };
+
+       if (skb_is_gso(skb)) {
+               struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+               /* This is a hint as to how much should be linear. */
+               vnet_hdr->hdr_len =
+                       __cpu_to_virtio16(vio_le(), skb_headlen(skb));
+               vnet_hdr->gso_size =
+                       __cpu_to_virtio16(vio_le(), sinfo->gso_size);
+
+               if (sinfo->gso_type & SKB_GSO_TCPV4)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+               else if (sinfo->gso_type & SKB_GSO_TCPV6)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (sinfo->gso_type & SKB_GSO_UDP)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+               else if (sinfo->gso_type & SKB_GSO_FCOE)
+                       return -EINVAL;
+               else
+                       BUG();
+
+               if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+                       vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+       } else
+               vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               vnet_hdr->csum_start = __cpu_to_virtio16(vio_le(),
+                                 skb_checksum_start_offset(skb));
+               vnet_hdr->csum_offset = __cpu_to_virtio16(vio_le(),
+                                                skb->csum_offset);
+       } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+       } /* else everything is zero */
+
+       return 0;
+}
+
+static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
+                          size_t *len)
+{
+       struct virtio_net_hdr vnet_hdr;
+
+       if (*len < sizeof(vnet_hdr))
+               return -EINVAL;
+       *len -= sizeof(vnet_hdr);
+
+       if (__packet_rcv_vnet(skb, &vnet_hdr))
+               return -EINVAL;
+
+       return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+}
+
 /*
  * This function makes lazy skb cloning in hope that most of packets
  * are discarded by BPF.
@@ -2148,7 +2206,9 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                unsigned int maclen = skb_network_offset(skb);
                netoff = TPACKET_ALIGN(po->tp_hdrlen +
                                       (maclen < 16 ? 16 : maclen)) +
-                       po->tp_reserve;
+                                      po->tp_reserve;
+               if (po->has_vnet_hdr)
+                       netoff += sizeof(struct virtio_net_hdr);
                macoff = netoff - maclen;
        }
        if (po->tp_version <= TPACKET_V2) {
@@ -2185,7 +2245,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        h.raw = packet_current_rx_frame(po, skb,
                                        TP_STATUS_KERNEL, (macoff+snaplen));
        if (!h.raw)
-               goto ring_is_full;
+               goto drop_n_account;
        if (po->tp_version <= TPACKET_V2) {
                packet_increment_rx_head(po, &po->rx_ring);
        /*
@@ -2204,6 +2264,14 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
+       if (po->has_vnet_hdr) {
+               if (__packet_rcv_vnet(skb, h.raw + macoff -
+                                          sizeof(struct virtio_net_hdr))) {
+                       spin_lock(&sk->sk_receive_queue.lock);
+                       goto drop_n_account;
+               }
+       }
+
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
        if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2299,7 +2367,7 @@ drop:
        kfree_skb(skb);
        return 0;
 
-ring_is_full:
+drop_n_account:
        po->stats.stats1.tp_drops++;
        spin_unlock(&sk->sk_receive_queue.lock);
 
@@ -2347,15 +2415,92 @@ static void tpacket_set_protocol(const struct net_device *dev,
        }
 }
 
+static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
+{
+       unsigned short gso_type = 0;
+
+       if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+           (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
+            __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
+             __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
+               vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
+                        __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
+                       __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
+
+       if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
+               return -EINVAL;
+
+       if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+               switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+               case VIRTIO_NET_HDR_GSO_TCPV4:
+                       gso_type = SKB_GSO_TCPV4;
+                       break;
+               case VIRTIO_NET_HDR_GSO_TCPV6:
+                       gso_type = SKB_GSO_TCPV6;
+                       break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+                       gso_type = SKB_GSO_UDP;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+                       gso_type |= SKB_GSO_TCP_ECN;
+
+               if (vnet_hdr->gso_size == 0)
+                       return -EINVAL;
+       }
+
+       vnet_hdr->gso_type = gso_type;  /* changes type, temporary storage */
+       return 0;
+}
+
+static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
+                                struct virtio_net_hdr *vnet_hdr)
+{
+       int n;
+
+       if (*len < sizeof(*vnet_hdr))
+               return -EINVAL;
+       *len -= sizeof(*vnet_hdr);
+
+       n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
+       if (n != sizeof(*vnet_hdr))
+               return -EFAULT;
+
+       return __packet_snd_vnet_parse(vnet_hdr, *len);
+}
+
+static int packet_snd_vnet_gso(struct sk_buff *skb,
+                              struct virtio_net_hdr *vnet_hdr)
+{
+       if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+               u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start);
+               u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset);
+
+               if (!skb_partial_csum_set(skb, s, o))
+                       return -EINVAL;
+       }
+
+       skb_shinfo(skb)->gso_size =
+               __virtio16_to_cpu(vio_le(), vnet_hdr->gso_size);
+       skb_shinfo(skb)->gso_type = vnet_hdr->gso_type;
+
+       /* Header must be checked, and gso_segs computed. */
+       skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+       skb_shinfo(skb)->gso_segs = 0;
+       return 0;
+}
+
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
-               void *frame, struct net_device *dev, int size_max,
-               __be16 proto, unsigned char *addr, int hlen)
+               void *frame, struct net_device *dev, void *data, int tp_len,
+               __be16 proto, unsigned char *addr, int hlen, int copylen)
 {
        union tpacket_uhdr ph;
-       int to_write, offset, len, tp_len, nr_frags, len_max;
+       int to_write, offset, len, nr_frags, len_max;
        struct socket *sock = po->sk.sk_socket;
        struct page *page;
-       void *data;
        int err;
 
        ph.raw = frame;
@@ -2367,51 +2512,9 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
        skb_shinfo(skb)->destructor_arg = ph.raw;
 
-       switch (po->tp_version) {
-       case TPACKET_V2:
-               tp_len = ph.h2->tp_len;
-               break;
-       default:
-               tp_len = ph.h1->tp_len;
-               break;
-       }
-       if (unlikely(tp_len > size_max)) {
-               pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
-               return -EMSGSIZE;
-       }
-
        skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
 
-       if (unlikely(po->tp_tx_has_off)) {
-               int off_min, off_max, off;
-               off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
-               off_max = po->tx_ring.frame_size - tp_len;
-               if (sock->type == SOCK_DGRAM) {
-                       switch (po->tp_version) {
-                       case TPACKET_V2:
-                               off = ph.h2->tp_net;
-                               break;
-                       default:
-                               off = ph.h1->tp_net;
-                               break;
-                       }
-               } else {
-                       switch (po->tp_version) {
-                       case TPACKET_V2:
-                               off = ph.h2->tp_mac;
-                               break;
-                       default:
-                               off = ph.h1->tp_mac;
-                               break;
-                       }
-               }
-               if (unlikely((off < off_min) || (off_max < off)))
-                       return -EINVAL;
-               data = ph.raw + off;
-       } else {
-               data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
-       }
        to_write = tp_len;
 
        if (sock->type == SOCK_DGRAM) {
@@ -2419,20 +2522,17 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                                NULL, tp_len);
                if (unlikely(err < 0))
                        return -EINVAL;
-       } else if (dev->hard_header_len) {
-               if (ll_header_truncated(dev, tp_len))
-                       return -EINVAL;
-
+       } else if (copylen) {
                skb_push(skb, dev->hard_header_len);
-               err = skb_store_bits(skb, 0, data,
-                               dev->hard_header_len);
+               skb_put(skb, copylen - dev->hard_header_len);
+               err = skb_store_bits(skb, 0, data, copylen);
                if (unlikely(err))
                        return err;
                if (!skb->protocol)
                        tpacket_set_protocol(dev, skb);
 
-               data += dev->hard_header_len;
-               to_write -= dev->hard_header_len;
+               data += copylen;
+               to_write -= copylen;
        }
 
        offset = offset_in_page(data);
@@ -2469,10 +2569,66 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        return tp_len;
 }
 
+static int tpacket_parse_header(struct packet_sock *po, void *frame,
+                               int size_max, void **data)
+{
+       union tpacket_uhdr ph;
+       int tp_len, off;
+
+       ph.raw = frame;
+
+       switch (po->tp_version) {
+       case TPACKET_V2:
+               tp_len = ph.h2->tp_len;
+               break;
+       default:
+               tp_len = ph.h1->tp_len;
+               break;
+       }
+       if (unlikely(tp_len > size_max)) {
+               pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
+               return -EMSGSIZE;
+       }
+
+       if (unlikely(po->tp_tx_has_off)) {
+               int off_min, off_max;
+
+               off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+               off_max = po->tx_ring.frame_size - tp_len;
+               if (po->sk.sk_type == SOCK_DGRAM) {
+                       switch (po->tp_version) {
+                       case TPACKET_V2:
+                               off = ph.h2->tp_net;
+                               break;
+                       default:
+                               off = ph.h1->tp_net;
+                               break;
+                       }
+               } else {
+                       switch (po->tp_version) {
+                       case TPACKET_V2:
+                               off = ph.h2->tp_mac;
+                               break;
+                       default:
+                               off = ph.h1->tp_mac;
+                               break;
+                       }
+               }
+               if (unlikely((off < off_min) || (off_max < off)))
+                       return -EINVAL;
+       } else {
+               off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+       }
+
+       *data = frame + off;
+       return tp_len;
+}
+
 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 {
        struct sk_buff *skb;
        struct net_device *dev;
+       struct virtio_net_hdr *vnet_hdr = NULL;
        __be16 proto;
        int err, reserve = 0;
        void *ph;
@@ -2480,9 +2636,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
        int tp_len, size_max;
        unsigned char *addr;
+       void *data;
        int len_sum = 0;
        int status = TP_STATUS_AVAILABLE;
-       int hlen, tlen;
+       int hlen, tlen, copylen = 0;
 
        mutex_lock(&po->pg_vec_lock);
 
@@ -2515,7 +2672,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        size_max = po->tx_ring.frame_size
                - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
 
-       if (size_max > dev->mtu + reserve + VLAN_HLEN)
+       if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
                size_max = dev->mtu + reserve + VLAN_HLEN;
 
        do {
@@ -2527,11 +2684,36 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                        continue;
                }
 
+               skb = NULL;
+               tp_len = tpacket_parse_header(po, ph, size_max, &data);
+               if (tp_len < 0)
+                       goto tpacket_error;
+
                status = TP_STATUS_SEND_REQUEST;
                hlen = LL_RESERVED_SPACE(dev);
                tlen = dev->needed_tailroom;
+               if (po->has_vnet_hdr) {
+                       vnet_hdr = data;
+                       data += sizeof(*vnet_hdr);
+                       tp_len -= sizeof(*vnet_hdr);
+                       if (tp_len < 0 ||
+                           __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       copylen = __virtio16_to_cpu(vio_le(),
+                                                   vnet_hdr->hdr_len);
+               }
+               if (dev->hard_header_len) {
+                       if (ll_header_truncated(dev, tp_len)) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       copylen = max_t(int, copylen, dev->hard_header_len);
+               }
                skb = sock_alloc_send_skb(&po->sk,
-                               hlen + tlen + sizeof(struct sockaddr_ll),
+                               hlen + tlen + sizeof(struct sockaddr_ll) +
+                               (copylen - dev->hard_header_len),
                                !need_wait, &err);
 
                if (unlikely(skb == NULL)) {
@@ -2540,14 +2722,16 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                                err = len_sum;
                        goto out_status;
                }
-               tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
-                                         addr, hlen);
+               tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
+                                         addr, hlen, copylen);
                if (likely(tp_len >= 0) &&
                    tp_len > dev->mtu + reserve &&
+                   !po->has_vnet_hdr &&
                    !packet_extra_vlan_len_allowed(dev, skb))
                        tp_len = -EMSGSIZE;
 
                if (unlikely(tp_len < 0)) {
+tpacket_error:
                        if (po->tp_loss) {
                                __packet_set_status(po, ph,
                                                TP_STATUS_AVAILABLE);
@@ -2561,6 +2745,11 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                        }
                }
 
+               if (po->has_vnet_hdr && packet_snd_vnet_gso(skb, vnet_hdr)) {
+                       tp_len = -EINVAL;
+                       goto tpacket_error;
+               }
+
                packet_pick_tx_queue(dev, skb);
 
                skb->destructor = tpacket_destruct_skb;
@@ -2643,12 +2832,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        struct sockcm_cookie sockc;
        struct virtio_net_hdr vnet_hdr = { 0 };
        int offset = 0;
-       int vnet_hdr_len;
        struct packet_sock *po = pkt_sk(sk);
-       unsigned short gso_type = 0;
        int hlen, tlen;
        int extra_len = 0;
-       ssize_t n;
 
        /*
         *      Get and verify the address.
@@ -2686,53 +2872,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (sock->type == SOCK_RAW)
                reserve = dev->hard_header_len;
        if (po->has_vnet_hdr) {
-               vnet_hdr_len = sizeof(vnet_hdr);
-
-               err = -EINVAL;
-               if (len < vnet_hdr_len)
-                       goto out_unlock;
-
-               len -= vnet_hdr_len;
-
-               err = -EFAULT;
-               n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
-               if (n != vnet_hdr_len)
-                       goto out_unlock;
-
-               if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
-                   (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
-                    __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
-                     __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
-                       vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
-                                __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
-                               __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
-
-               err = -EINVAL;
-               if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
+               err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
+               if (err)
                        goto out_unlock;
-
-               if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
-                       switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
-                       case VIRTIO_NET_HDR_GSO_TCPV4:
-                               gso_type = SKB_GSO_TCPV4;
-                               break;
-                       case VIRTIO_NET_HDR_GSO_TCPV6:
-                               gso_type = SKB_GSO_TCPV6;
-                               break;
-                       case VIRTIO_NET_HDR_GSO_UDP:
-                               gso_type = SKB_GSO_UDP;
-                               break;
-                       default:
-                               goto out_unlock;
-                       }
-
-                       if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
-                               gso_type |= SKB_GSO_TCP_ECN;
-
-                       if (vnet_hdr.gso_size == 0)
-                               goto out_unlock;
-
-               }
        }
 
        if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2744,7 +2886,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        err = -EMSGSIZE;
-       if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
+       if (!vnet_hdr.gso_type &&
+           (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
                goto out_unlock;
 
        err = -ENOBUFS;
@@ -2775,7 +2918,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
        sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
 
-       if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
+       if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
            !packet_extra_vlan_len_allowed(dev, skb)) {
                err = -EMSGSIZE;
                goto out_free;
@@ -2789,24 +2932,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        packet_pick_tx_queue(dev, skb);
 
        if (po->has_vnet_hdr) {
-               if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-                       u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
-                       u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
-                       if (!skb_partial_csum_set(skb, s, o)) {
-                               err = -EINVAL;
-                               goto out_free;
-                       }
-               }
-
-               skb_shinfo(skb)->gso_size =
-                       __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
-               skb_shinfo(skb)->gso_type = gso_type;
-
-               /* Header must be checked, and gso_segs computed. */
-               skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-               skb_shinfo(skb)->gso_segs = 0;
-
-               len += vnet_hdr_len;
+               err = packet_snd_vnet_gso(skb, &vnet_hdr);
+               if (err)
+                       goto out_free;
+               len += sizeof(vnet_hdr);
        }
 
        skb_probe_transport_header(skb, reserve);
@@ -3177,51 +3306,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                packet_rcv_has_room(pkt_sk(sk), NULL);
 
        if (pkt_sk(sk)->has_vnet_hdr) {
-               struct virtio_net_hdr vnet_hdr = { 0 };
-
-               err = -EINVAL;
-               vnet_hdr_len = sizeof(vnet_hdr);
-               if (len < vnet_hdr_len)
-                       goto out_free;
-
-               len -= vnet_hdr_len;
-
-               if (skb_is_gso(skb)) {
-                       struct skb_shared_info *sinfo = skb_shinfo(skb);
-
-                       /* This is a hint as to how much should be linear. */
-                       vnet_hdr.hdr_len =
-                               __cpu_to_virtio16(vio_le(), skb_headlen(skb));
-                       vnet_hdr.gso_size =
-                               __cpu_to_virtio16(vio_le(), sinfo->gso_size);
-                       if (sinfo->gso_type & SKB_GSO_TCPV4)
-                               vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
-                       else if (sinfo->gso_type & SKB_GSO_TCPV6)
-                               vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-                       else if (sinfo->gso_type & SKB_GSO_UDP)
-                               vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
-                       else if (sinfo->gso_type & SKB_GSO_FCOE)
-                               goto out_free;
-                       else
-                               BUG();
-                       if (sinfo->gso_type & SKB_GSO_TCP_ECN)
-                               vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
-               } else
-                       vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
-
-               if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
-                                         skb_checksum_start_offset(skb));
-                       vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
-                                                        skb->csum_offset);
-               } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
-                       vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
-               } /* else everything is zero */
-
-               err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
-               if (err < 0)
+               err = packet_rcv_vnet(msg, skb, &len);
+               if (err)
                        goto out_free;
+               vnet_hdr_len = sizeof(struct virtio_net_hdr);
        }
 
        /* You lose any data beyond the buffer you gave. If it worries
@@ -3552,8 +3640,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                }
                if (optlen < len)
                        return -EINVAL;
-               if (pkt_sk(sk)->has_vnet_hdr)
-                       return -EINVAL;
                if (copy_from_user(&req_u.req, optval, len))
                        return -EFAULT;
                return packet_set_ring(sk, &req_u, 0,
index f53bf3b6558b094b6e1b379568620157b4ffae17..48ebccf0fe727f70e9f09d77b053cf4e2d2324ca 100644 (file)
@@ -57,6 +57,8 @@ struct rfkill {
 
        bool                    registered;
        bool                    persistent;
+       bool                    polling_paused;
+       bool                    suspended;
 
        const struct rfkill_ops *ops;
        void                    *data;
@@ -233,29 +235,6 @@ static void rfkill_event(struct rfkill *rfkill)
        rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
 }
 
-static bool __rfkill_set_hw_state(struct rfkill *rfkill,
-                                 bool blocked, bool *change)
-{
-       unsigned long flags;
-       bool prev, any;
-
-       BUG_ON(!rfkill);
-
-       spin_lock_irqsave(&rfkill->lock, flags);
-       prev = !!(rfkill->state & RFKILL_BLOCK_HW);
-       if (blocked)
-               rfkill->state |= RFKILL_BLOCK_HW;
-       else
-               rfkill->state &= ~RFKILL_BLOCK_HW;
-       *change = prev != blocked;
-       any = !!(rfkill->state & RFKILL_BLOCK_ANY);
-       spin_unlock_irqrestore(&rfkill->lock, flags);
-
-       rfkill_led_trigger_event(rfkill);
-
-       return any;
-}
-
 /**
  * rfkill_set_block - wrapper for set_block method
  *
@@ -285,7 +264,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        spin_lock_irqsave(&rfkill->lock, flags);
        prev = rfkill->state & RFKILL_BLOCK_SW;
 
-       if (rfkill->state & RFKILL_BLOCK_SW)
+       if (prev)
                rfkill->state |= RFKILL_BLOCK_SW_PREV;
        else
                rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
@@ -332,8 +311,7 @@ static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
  * @blocked: the new state
  *
  * This function sets the state of all switches of given type,
- * unless a specific switch is claimed by userspace (in which case,
- * that switch is left alone) or suspended.
+ * unless a specific switch is suspended.
  *
  * Caller must have acquired rfkill_global_mutex.
  */
@@ -480,14 +458,26 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
 
 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
 {
-       bool ret, change;
+       unsigned long flags;
+       bool ret, prev;
 
-       ret = __rfkill_set_hw_state(rfkill, blocked, &change);
+       BUG_ON(!rfkill);
+
+       spin_lock_irqsave(&rfkill->lock, flags);
+       prev = !!(rfkill->state & RFKILL_BLOCK_HW);
+       if (blocked)
+               rfkill->state |= RFKILL_BLOCK_HW;
+       else
+               rfkill->state &= ~RFKILL_BLOCK_HW;
+       ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
+       spin_unlock_irqrestore(&rfkill->lock, flags);
+
+       rfkill_led_trigger_event(rfkill);
 
        if (!rfkill->registered)
                return ret;
 
-       if (change)
+       if (prev != blocked)
                schedule_work(&rfkill->uevent_work);
 
        return ret;
@@ -730,20 +720,12 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RW(state);
 
-static ssize_t claim_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       return sprintf(buf, "%d\n", 0);
-}
-static DEVICE_ATTR_RO(claim);
-
 static struct attribute *rfkill_dev_attrs[] = {
        &dev_attr_name.attr,
        &dev_attr_type.attr,
        &dev_attr_index.attr,
        &dev_attr_persistent.attr,
        &dev_attr_state.attr,
-       &dev_attr_claim.attr,
        &dev_attr_soft.attr,
        &dev_attr_hard.attr,
        NULL,
@@ -786,6 +768,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
        if (!rfkill->ops->poll)
                return;
 
+       rfkill->polling_paused = true;
        cancel_delayed_work_sync(&rfkill->poll_work);
 }
 EXPORT_SYMBOL(rfkill_pause_polling);
@@ -797,6 +780,11 @@ void rfkill_resume_polling(struct rfkill *rfkill)
        if (!rfkill->ops->poll)
                return;
 
+       rfkill->polling_paused = false;
+
+       if (rfkill->suspended)
+               return;
+
        queue_delayed_work(system_power_efficient_wq,
                           &rfkill->poll_work, 0);
 }
@@ -807,7 +795,8 @@ static int rfkill_suspend(struct device *dev)
 {
        struct rfkill *rfkill = to_rfkill(dev);
 
-       rfkill_pause_polling(rfkill);
+       rfkill->suspended = true;
+       cancel_delayed_work_sync(&rfkill->poll_work);
 
        return 0;
 }
@@ -817,12 +806,16 @@ static int rfkill_resume(struct device *dev)
        struct rfkill *rfkill = to_rfkill(dev);
        bool cur;
 
+       rfkill->suspended = false;
+
        if (!rfkill->persistent) {
                cur = !!(rfkill->state & RFKILL_BLOCK_SW);
                rfkill_set_block(rfkill, cur);
        }
 
-       rfkill_resume_polling(rfkill);
+       if (rfkill->ops->poll && !rfkill->polling_paused)
+               queue_delayed_work(system_power_efficient_wq,
+                                  &rfkill->poll_work, 0);
 
        return 0;
 }
@@ -1095,17 +1088,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
        return res;
 }
 
-static bool rfkill_readable(struct rfkill_data *data)
-{
-       bool r;
-
-       mutex_lock(&data->mtx);
-       r = !list_empty(&data->events);
-       mutex_unlock(&data->mtx);
-
-       return r;
-}
-
 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                               size_t count, loff_t *pos)
 {
@@ -1122,8 +1104,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                        goto out;
                }
                mutex_unlock(&data->mtx);
+               /* since we re-check and it just compares pointers,
+                * using !list_empty() without locking isn't a problem
+                */
                ret = wait_event_interruptible(data->read_wait,
-                                              rfkill_readable(data));
+                                              !list_empty(&data->events));
                mutex_lock(&data->mtx);
 
                if (ret)
index f26bdea875c1a8413774e767bb897012d969d2e6..a1cd778240cd7f2b14336c26ba6eec282c1c60b6 100644 (file)
@@ -403,6 +403,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
                if (len <= cl->deficit) {
                        cl->deficit -= len;
                        skb = qdisc_dequeue_peeked(cl->qdisc);
+                       if (unlikely(skb == NULL))
+                               goto out;
                        if (cl->qdisc->q.qlen == 0)
                                list_del(&cl->alist);
 
index bf61dfb8e09eea1034c7b9ddd40583752033230c..49d2cc751386f3208c67eed17ec9c3f15b801f50 100644 (file)
@@ -935,15 +935,22 @@ static struct sctp_association *__sctp_lookup_association(
                                        struct sctp_transport **pt)
 {
        struct sctp_transport *t;
+       struct sctp_association *asoc = NULL;
 
+       rcu_read_lock();
        t = sctp_addrs_lookup_transport(net, local, peer);
-       if (!t || t->dead)
-               return NULL;
+       if (!t || !sctp_transport_hold(t))
+               goto out;
 
-       sctp_association_hold(t->asoc);
+       asoc = t->asoc;
+       sctp_association_hold(asoc);
        *pt = t;
 
-       return t->asoc;
+       sctp_transport_put(t);
+
+out:
+       rcu_read_unlock();
+       return asoc;
 }
 
 /* Look up an association. protected by RCU read lock */
@@ -955,9 +962,7 @@ struct sctp_association *sctp_lookup_association(struct net *net,
 {
        struct sctp_association *asoc;
 
-       rcu_read_lock();
        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
-       rcu_read_unlock();
 
        return asoc;
 }
index 684c5b31563badc1f097434211422f495d356aad..ded7d931a6a5b218c7bbd4481acdea1d2bc29325 100644 (file)
@@ -165,8 +165,6 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
        list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list,
                        transports) {
                addr = &transport->ipaddr;
-               if (transport->dead)
-                       continue;
 
                af = sctp_get_af_specific(addr->sa.sa_family);
                if (af->cmp_addr(addr, primary)) {
@@ -380,6 +378,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
+       if (!sctp_transport_hold(transport))
+               return 0;
        assoc = transport->asoc;
        epb = &assoc->base;
        sk = epb->sk;
@@ -412,6 +412,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                sk->sk_rcvbuf);
        seq_printf(seq, "\n");
 
+       sctp_transport_put(transport);
+
        return 0;
 }
 
@@ -489,12 +491,12 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        }
 
        tsp = (struct sctp_transport *)v;
+       if (!sctp_transport_hold(tsp))
+               return 0;
        assoc = tsp->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
                                transports) {
-               if (tsp->dead)
-                       continue;
                /*
                 * The remote address (ADDR)
                 */
@@ -544,6 +546,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "\n");
        }
 
+       sctp_transport_put(tsp);
+
        return 0;
 }
 
index 2e21384697c2a6a5fd045142bcd9c39992d3867f..b5327bb77458f3ae68e0098c45d00307a56ed2c5 100644 (file)
@@ -259,12 +259,6 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                goto out_unlock;
        }
 
-       /* Is this transport really dead and just waiting around for
-        * the timer to let go of the reference?
-        */
-       if (transport->dead)
-               goto out_unlock;
-
        /* Run through the state machine.  */
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
@@ -380,12 +374,6 @@ void sctp_generate_heartbeat_event(unsigned long data)
                goto out_unlock;
        }
 
-       /* Is this structure just waiting around for us to actually
-        * get destroyed?
-        */
-       if (transport->dead)
-               goto out_unlock;
-
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
index 4101c5b653d040140961ce975fbabfe7f0beac89..de8eabf03eed9b904afd78e9af6f2ab0b172cd2b 100644 (file)
@@ -5538,6 +5538,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
        struct sctp_hmac_algo_param *hmacs;
        __u16 data_len = 0;
        u32 num_idents;
+       int i;
 
        if (!ep->auth_enable)
                return -EACCES;
@@ -5555,8 +5556,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
                return -EFAULT;
        if (put_user(num_idents, &p->shmac_num_idents))
                return -EFAULT;
-       if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
-               return -EFAULT;
+       for (i = 0; i < num_idents; i++) {
+               __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+               if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+                       return -EFAULT;
+       }
        return 0;
 }
 
@@ -6636,6 +6641,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 
                        if (cmsgs->srinfo->sinfo_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_SACK_IMMEDIATELY |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
@@ -6659,6 +6665,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 
                        if (cmsgs->sinfo->snd_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_SACK_IMMEDIATELY |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
index aab9e3f29755a58eb7584aa763b4b1a46d2e6608..a431c14044a46f98e9b73f66e28f229690ef3c01 100644 (file)
@@ -132,8 +132,6 @@ fail:
  */
 void sctp_transport_free(struct sctp_transport *transport)
 {
-       transport->dead = 1;
-
        /* Try to delete the heartbeat timer.  */
        if (del_timer(&transport->hb_timer))
                sctp_transport_put(transport);
@@ -169,7 +167,7 @@ static void sctp_transport_destroy_rcu(struct rcu_head *head)
  */
 static void sctp_transport_destroy(struct sctp_transport *transport)
 {
-       if (unlikely(!transport->dead)) {
+       if (unlikely(atomic_read(&transport->refcnt))) {
                WARN(1, "Attempt to destroy undead transport %p!\n", transport);
                return;
        }
@@ -296,9 +294,9 @@ void sctp_transport_route(struct sctp_transport *transport,
 }
 
 /* Hold a reference to a transport.  */
-void sctp_transport_hold(struct sctp_transport *transport)
+int sctp_transport_hold(struct sctp_transport *transport)
 {
-       atomic_inc(&transport->refcnt);
+       return atomic_add_unless(&transport->refcnt, 1, 0);
 }
 
 /* Release a reference to a transport and clean up
index ebc661d3b6e37edb55d7c0a719ed9937a9f27a0d..47f7da58a7f0e93e3ffd632396a5a8db65c7f7c3 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/list.h>
 #include <linux/workqueue.h>
 #include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
@@ -567,7 +568,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
 }
 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
 
-static DEFINE_MUTEX(switchdev_mutex);
 static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
 
 /**
@@ -582,9 +582,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&switchdev_mutex);
+       rtnl_lock();
        err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
-       mutex_unlock(&switchdev_mutex);
+       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
@@ -600,9 +600,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&switchdev_mutex);
+       rtnl_lock();
        err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
-       mutex_unlock(&switchdev_mutex);
+       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
@@ -616,16 +616,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
  *     Call all network notifier blocks. This should be called by driver
  *     when it needs to propagate hardware event.
  *     Return values are same as for atomic_notifier_call_chain().
+ *     rtnl_lock must be held.
  */
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
                             struct switchdev_notifier_info *info)
 {
        int err;
 
+       ASSERT_RTNL();
+
        info->dev = dev;
-       mutex_lock(&switchdev_mutex);
        err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
-       mutex_unlock(&switchdev_mutex);
        return err;
 }
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
index 0c2944fb9ae0d34ca8e153d9412c5896b47ffacf..6f4a6d9b014989f29ff378d3e84cb60fd809aabe 100644 (file)
@@ -123,7 +123,6 @@ struct tipc_stats {
 struct tipc_link {
        u32 addr;
        char name[TIPC_MAX_LINK_NAME];
-       struct tipc_media_addr *media_addr;
        struct net *net;
 
        /* Management and link supervision data */
@@ -1261,26 +1260,6 @@ drop:
        return rc;
 }
 
-/*
- * Send protocol message to the other endpoint.
- */
-static void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ,
-                                int probe_msg, u32 gap, u32 tolerance,
-                                u32 priority)
-{
-       struct sk_buff *skb = NULL;
-       struct sk_buff_head xmitq;
-
-       __skb_queue_head_init(&xmitq);
-       tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
-                                 tolerance, priority, &xmitq);
-       skb = __skb_dequeue(&xmitq);
-       if (!skb)
-               return;
-       tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
-       l->rcv_unacked = 0;
-}
-
 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
                                      u16 rcvgap, int tolerance, int priority,
                                      struct sk_buff_head *xmitq)
@@ -1479,6 +1458,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
                        l->tolerance = peers_tol;
 
+               if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
+                                          TIPC_MAX_LINK_PRI)) {
+                       l->priority = peers_prio;
+                       rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+               }
+
                l->silent_intv_cnt = 0;
                l->stats.recv_states++;
                if (msg_probe(hdr))
@@ -2021,16 +2006,18 @@ msg_full:
        return -EMSGSIZE;
 }
 
-void tipc_link_set_tolerance(struct tipc_link *l, u32 tol)
+void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
+                            struct sk_buff_head *xmitq)
 {
        l->tolerance = tol;
-       tipc_link_proto_xmit(l, STATE_MSG, 0, 0, tol, 0);
+       tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
 }
 
-void tipc_link_set_prio(struct tipc_link *l, u32 prio)
+void tipc_link_set_prio(struct tipc_link *l, u32 prio,
+                       struct sk_buff_head *xmitq)
 {
        l->priority = prio;
-       tipc_link_proto_xmit(l, STATE_MSG, 0, 0, 0, prio);
+       tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
 }
 
 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
index b2ae0f4276afd72c58b0f08101deeef1a39dc772..b4ee9d6e181d2c2b2a17eefa865dae9c7071f543 100644 (file)
@@ -112,8 +112,10 @@ char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
 unsigned long tipc_link_tolerance(struct tipc_link *l);
-void tipc_link_set_tolerance(struct tipc_link *l, u32 tol);
-void tipc_link_set_prio(struct tipc_link *l, u32 prio);
+void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
+                            struct sk_buff_head *xmitq);
+void tipc_link_set_prio(struct tipc_link *l, u32 prio,
+                       struct sk_buff_head *xmitq);
 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit);
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
index 91fce70291a898cdbfd439d3eedceeb249bb0622..777b979b84634fbd98aa3ed49388c33e0a9472c7 100644 (file)
@@ -418,6 +418,9 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
                                   struct tipc_subscription *s)
 {
        struct sub_seq *sseq = nseq->sseqs;
+       struct tipc_name_seq ns;
+
+       tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
 
        list_add(&s->nameseq_list, &nseq->subscriptions);
 
@@ -425,7 +428,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
                return;
 
        while (sseq != &nseq->sseqs[nseq->first_free]) {
-               if (tipc_subscrp_check_overlap(s, sseq->lower, sseq->upper)) {
+               if (tipc_subscrp_check_overlap(&ns, sseq->lower, sseq->upper)) {
                        struct publication *crs;
                        struct name_info *info = sseq->info;
                        int must_report = 1;
@@ -722,9 +725,10 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
 void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
        struct tipc_net *tn = net_generic(s->net, tipc_net_id);
-       u32 type = s->seq.type;
+       u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
        int index = hash(type);
        struct name_seq *seq;
+       struct tipc_name_seq ns;
 
        spin_lock_bh(&tn->nametbl_lock);
        seq = nametbl_find_seq(s->net, type);
@@ -735,8 +739,9 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
                tipc_nameseq_subscribe(seq, s);
                spin_unlock_bh(&seq->lock);
        } else {
+               tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
                pr_warn("Failed to create subscription for {%u,%u,%u}\n",
-                       s->seq.type, s->seq.lower, s->seq.upper);
+                       ns.type, ns.lower, ns.upper);
        }
        spin_unlock_bh(&tn->nametbl_lock);
 }
@@ -748,9 +753,10 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
        struct tipc_net *tn = net_generic(s->net, tipc_net_id);
        struct name_seq *seq;
+       u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
 
        spin_lock_bh(&tn->nametbl_lock);
-       seq = nametbl_find_seq(s->net, s->seq.type);
+       seq = nametbl_find_seq(s->net, type);
        if (seq != NULL) {
                spin_lock_bh(&seq->lock);
                list_del_init(&s->nameseq_list);
index fa97d9649a2851f1e6d5c8a1f39f2e4f7603e61b..f8a8255a7182905ffa3af8e83bf5fd716db38400 100644 (file)
@@ -1637,9 +1637,12 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_link *link;
        struct tipc_node *node;
+       struct sk_buff_head xmitq;
        struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
        struct net *net = sock_net(skb->sk);
 
+       __skb_queue_head_init(&xmitq);
+
        if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
 
@@ -1683,13 +1686,13 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
                        u32 tol;
 
                        tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
-                       tipc_link_set_tolerance(link, tol);
+                       tipc_link_set_tolerance(link, tol, &xmitq);
                }
                if (props[TIPC_NLA_PROP_PRIO]) {
                        u32 prio;
 
                        prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
-                       tipc_link_set_prio(link, prio);
+                       tipc_link_set_prio(link, prio, &xmitq);
                }
                if (props[TIPC_NLA_PROP_WIN]) {
                        u32 win;
@@ -1701,7 +1704,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
 
 out:
        tipc_node_read_unlock(node);
-
+       tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
        return res;
 }
 
index 922e04a43396db1f19fa6f1721a29840fb9b45d8..2446bfbaa309284e9d23dd590650e1576ec6b072 100644 (file)
@@ -571,13 +571,13 @@ static void tipc_work_stop(struct tipc_server *s)
 
 static int tipc_work_start(struct tipc_server *s)
 {
-       s->rcv_wq = alloc_workqueue("tipc_rcv", WQ_UNBOUND, 1);
+       s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
        if (!s->rcv_wq) {
                pr_err("can't start tipc receive workqueue\n");
                return -ENOMEM;
        }
 
-       s->send_wq = alloc_workqueue("tipc_send", WQ_UNBOUND, 1);
+       s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
        if (!s->send_wq) {
                pr_err("can't start tipc send workqueue\n");
                destroy_workqueue(s->rcv_wq);
index 350cca33ee0a64b08ce6135aa130011263eba136..22963cafd5ede27d59ecb772d1cd5c4257cacb98 100644 (file)
@@ -92,25 +92,42 @@ static void tipc_subscrp_send_event(struct tipc_subscription *sub,
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
-int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
                               u32 found_upper)
 {
-       if (found_lower < sub->seq.lower)
-               found_lower = sub->seq.lower;
-       if (found_upper > sub->seq.upper)
-               found_upper = sub->seq.upper;
+       if (found_lower < seq->lower)
+               found_lower = seq->lower;
+       if (found_upper > seq->upper)
+               found_upper = seq->upper;
        if (found_lower > found_upper)
                return 0;
        return 1;
 }
 
+u32 tipc_subscrp_convert_seq_type(u32 type, int swap)
+{
+       return htohl(type, swap);
+}
+
+void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
+                             struct tipc_name_seq *out)
+{
+       out->type = htohl(in->type, swap);
+       out->lower = htohl(in->lower, swap);
+       out->upper = htohl(in->upper, swap);
+}
+
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
                                 u32 found_upper, u32 event, u32 port_ref,
                                 u32 node, int must)
 {
-       if (!tipc_subscrp_check_overlap(sub, found_lower, found_upper))
+       struct tipc_name_seq seq;
+
+       tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
+       if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
                return;
-       if (!must && !(sub->filter & TIPC_SUB_PORTS))
+       if (!must &&
+           !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
                return;
 
        tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
@@ -171,12 +188,14 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
 static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
        struct tipc_subscription *sub, *temp;
+       u32 timeout;
 
        spin_lock_bh(&subscriber->lock);
        /* Destroy any existing subscriptions for subscriber */
        list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
                                 subscrp_list) {
-               if (del_timer(&sub->timer)) {
+               timeout = htohl(sub->evt.s.timeout, sub->swap);
+               if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
                        tipc_subscrp_delete(sub);
                        tipc_subscrb_put(subscriber);
                }
@@ -200,13 +219,16 @@ static void tipc_subscrp_cancel(struct tipc_subscr *s,
                                struct tipc_subscriber *subscriber)
 {
        struct tipc_subscription *sub, *temp;
+       u32 timeout;
 
        spin_lock_bh(&subscriber->lock);
        /* Find first matching subscription, exit if not found */
        list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
                                 subscrp_list) {
                if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-                       if (del_timer(&sub->timer)) {
+                       timeout = htohl(sub->evt.s.timeout, sub->swap);
+                       if ((timeout == TIPC_WAIT_FOREVER) ||
+                           del_timer(&sub->timer)) {
                                tipc_subscrp_delete(sub);
                                tipc_subscrb_put(subscriber);
                        }
@@ -216,66 +238,67 @@ static void tipc_subscrp_cancel(struct tipc_subscr *s,
        spin_unlock_bh(&subscriber->lock);
 }
 
-static int tipc_subscrp_create(struct net *net, struct tipc_subscr *s,
-                              struct tipc_subscriber *subscriber,
-                              struct tipc_subscription **sub_p)
+static struct tipc_subscription *tipc_subscrp_create(struct net *net,
+                                                    struct tipc_subscr *s,
+                                                    int swap)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_subscription *sub;
-       int swap;
-
-       /* Determine subscriber's endianness */
-       swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
-
-       /* Detect & process a subscription cancellation request */
-       if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
-               s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
-               tipc_subscrp_cancel(s, subscriber);
-               return 0;
-       }
+       u32 filter = htohl(s->filter, swap);
 
        /* Refuse subscription if global limit exceeded */
        if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
                        TIPC_MAX_SUBSCRIPTIONS);
-               return -EINVAL;
+               return NULL;
        }
 
        /* Allocate subscription object */
        sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
        if (!sub) {
                pr_warn("Subscription rejected, no memory\n");
-               return -ENOMEM;
+               return NULL;
        }
 
        /* Initialize subscription object */
        sub->net = net;
-       sub->seq.type = htohl(s->seq.type, swap);
-       sub->seq.lower = htohl(s->seq.lower, swap);
-       sub->seq.upper = htohl(s->seq.upper, swap);
-       sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
-       sub->filter = htohl(s->filter, swap);
-       if ((!(sub->filter & TIPC_SUB_PORTS) ==
-            !(sub->filter & TIPC_SUB_SERVICE)) ||
-           (sub->seq.lower > sub->seq.upper)) {
+       if (((filter & TIPC_SUB_PORTS) && (filter & TIPC_SUB_SERVICE)) ||
+           (htohl(s->seq.lower, swap) > htohl(s->seq.upper, swap))) {
                pr_warn("Subscription rejected, illegal request\n");
                kfree(sub);
-               return -EINVAL;
+               return NULL;
        }
-       spin_lock_bh(&subscriber->lock);
-       list_add(&sub->subscrp_list, &subscriber->subscrp_list);
-       spin_unlock_bh(&subscriber->lock);
-       sub->subscriber = subscriber;
+
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(*s));
        atomic_inc(&tn->subscription_count);
+       return sub;
+}
+
+static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
+                                  struct tipc_subscriber *subscriber, int swap)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_subscription *sub = NULL;
+       u32 timeout;
+
+       sub = tipc_subscrp_create(net, s, swap);
+       if (!sub)
+               return tipc_conn_terminate(tn->topsrv, subscriber->conid);
+
+       spin_lock_bh(&subscriber->lock);
+       list_add(&sub->subscrp_list, &subscriber->subscrp_list);
+       tipc_subscrb_get(subscriber);
+       sub->subscriber = subscriber;
+       tipc_nametbl_subscribe(sub);
+       spin_unlock_bh(&subscriber->lock);
+
+       timeout = htohl(sub->evt.s.timeout, swap);
+       if (timeout == TIPC_WAIT_FOREVER)
+               return;
+
        setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
-       if (sub->timeout != TIPC_WAIT_FOREVER)
-               sub->timeout += jiffies;
-       if (!mod_timer(&sub->timer, sub->timeout))
-               tipc_subscrb_get(subscriber);
-       *sub_p = sub;
-       return 0;
+       mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
 }
 
 /* Handle one termination request for the subscriber */
@@ -290,14 +313,20 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
                                void *buf, size_t len)
 {
        struct tipc_subscriber *subscriber = usr_data;
-       struct tipc_subscription *sub = NULL;
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_subscr *s = (struct tipc_subscr *)buf;
+       int swap;
+
+       /* Determine subscriber's endianness */
+       swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE |
+                             TIPC_SUB_CANCEL));
+
+       /* Detect & process a subscription cancellation request */
+       if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
+               s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+               return tipc_subscrp_cancel(s, subscriber);
+       }
 
-       tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
-       if (sub)
-               tipc_nametbl_subscribe(sub);
-       else
-               tipc_conn_terminate(tn->topsrv, subscriber->conid);
+       tipc_subscrp_subscribe(net, s, subscriber, swap);
 }
 
 /* Handle one request to establish a new subscriber */
index 92ee18cc5fe6ef5567a4e52a72e66748ed4b395a..be60103082c923c0fd768f52c081af38eb42491b 100644 (file)
@@ -50,21 +50,15 @@ struct tipc_subscriber;
  * @subscriber: pointer to its subscriber
  * @seq: name sequence associated with subscription
  * @net: point to network namespace
- * @timeout: duration of subscription (in ms)
- * @filter: event filtering to be done for subscription
  * @timer: timer governing subscription duration (optional)
  * @nameseq_list: adjacent subscriptions in name sequence's subscription list
  * @subscrp_list: adjacent subscriptions in subscriber's subscription list
- * @server_ref: object reference of server port associated with subscription
  * @swap: indicates if subscriber uses opposite endianness in its messages
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
        struct tipc_subscriber *subscriber;
-       struct tipc_name_seq seq;
        struct net *net;
-       unsigned long timeout;
-       u32 filter;
        struct timer_list timer;
        struct list_head nameseq_list;
        struct list_head subscrp_list;
@@ -72,11 +66,14 @@ struct tipc_subscription {
        struct tipc_event evt;
 };
 
-int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
                               u32 found_upper);
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
                                 u32 found_lower, u32 found_upper, u32 event,
                                 u32 port_ref, u32 node, int must);
+void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
+                             struct tipc_name_seq *out);
+u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
 int tipc_topsrv_start(struct net *net);
 void tipc_topsrv_stop(struct net *net);
 
index c5bf5ef2bf89476b6d7ab766b8591e6af2e869fd..a6d6654697779060ecdeeb34c75fcfe7e618f433 100644 (file)
@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        UNIXCB(skb).fp = NULL;
 
        for (i = scm->fp->count-1; i >= 0; i--)
-               unix_notinflight(scm->fp->fp[i]);
+               unix_notinflight(scm->fp->user, scm->fp->fp[i]);
 }
 
 static void unix_destruct_scm(struct sk_buff *skb)
@@ -1534,7 +1534,6 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
 {
        int i;
        unsigned char max_level = 0;
-       int unix_sock_count = 0;
 
        if (too_many_unix_fds(current))
                return -ETOOMANYREFS;
@@ -1542,11 +1541,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        for (i = scm->fp->count - 1; i >= 0; i--) {
                struct sock *sk = unix_get_socket(scm->fp->fp[i]);
 
-               if (sk) {
-                       unix_sock_count++;
+               if (sk)
                        max_level = max(max_level,
                                        unix_sk(sk)->recursion_level);
-               }
        }
        if (unlikely(max_level > MAX_RECURSION_LEVEL))
                return -ETOOMANYREFS;
@@ -1561,7 +1558,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
                return -ENOMEM;
 
        for (i = scm->fp->count - 1; i >= 0; i--)
-               unix_inflight(scm->fp->fp[i]);
+               unix_inflight(scm->fp->user, scm->fp->fp[i]);
        return max_level;
 }
 
@@ -2339,6 +2336,7 @@ again:
 
                        if (signal_pending(current)) {
                                err = sock_intr_errno(timeo);
+                               scm_destroy(&scm);
                                goto out;
                        }
 
index 8fcdc2283af50c5caecd409b79cae6028ca2c836..6a0d48525fcf9a71f54bb43495b200b300f5341e 100644 (file)
@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
  * descriptor if it is for an AF_UNIX socket.
  */
 
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
                }
                unix_tot_inflight++;
        }
-       fp->f_cred->user->unix_inflight++;
+       user->unix_inflight++;
        spin_unlock(&unix_gc_lock);
 }
 
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
                        list_del_init(&u->link);
                unix_tot_inflight--;
        }
-       fp->f_cred->user->unix_inflight--;
+       user->unix_inflight--;
        spin_unlock(&unix_gc_lock);
 }
 
index da72ed32f14385c3e218cda196e5b4c3511c896e..6c606120abfed96fe7fe4cdb23beb204e5ea109a 100644 (file)
@@ -50,8 +50,8 @@ config CFG80211_DEVELOPER_WARNINGS
        default n
        help
          This option enables some additional warnings that help
-         cfg80211 developers and driver developers, but that can
-         trigger due to races with userspace.
+         cfg80211 developers and driver developers, but beware that
+         they can also trigger due to races with userspace.
 
          For example, when a driver reports that it was disconnected
          from the AP, but the user disconnects manually at the same
@@ -61,19 +61,6 @@ config CFG80211_DEVELOPER_WARNINGS
          on it (or mac80211).
 
 
-config CFG80211_REG_DEBUG
-       bool "cfg80211 regulatory debugging"
-       depends on CFG80211
-       default n
-       ---help---
-         You can enable this if you want to debug regulatory changes.
-         For more information on cfg80211 regulatory refer to the wireless
-         wiki:
-
-         http://wireless.kernel.org/en/developers/Regulatory
-
-         If unsure, say N.
-
 config CFG80211_CERTIFICATION_ONUS
        bool "cfg80211 certification onus"
        depends on CFG80211 && EXPERT
@@ -123,7 +110,7 @@ config CFG80211_REG_RELAX_NO_IR
         interface which associated to an AP which userspace assumes or confirms
         to be an authorized master, i.e., with radar detection support and DFS
         capabilities. However, note that in order to not create daisy chain
-        scenarios, this relaxation is not allowed in cases that the BSS client
+        scenarios, this relaxation is not allowed in cases where the BSS client
         is associated to P2P GO and in addition the P2P GO instantiated on
         a channel due to this relaxation should not allow connection from
         non P2P clients.
@@ -148,7 +135,7 @@ config CFG80211_DEBUGFS
        depends on CFG80211
        depends on DEBUG_FS
        ---help---
-         You can enable this if you want to debugfs entries for cfg80211.
+         You can enable this if you want debugfs entries for cfg80211.
 
          If unsure, say N.
 
@@ -159,7 +146,7 @@ config CFG80211_INTERNAL_REGDB
        ---help---
          This option generates an internal data structure representing
          the wireless regulatory rules described in net/wireless/db.txt
-         and includes code to query that database.  This is an alternative
+         and includes code to query that database. This is an alternative
          to using CRDA for defining regulatory rules for the kernel.
 
          Using this option requires some parsing of the db.txt at build time,
@@ -172,7 +159,7 @@ config CFG80211_INTERNAL_REGDB
 
          http://wireless.kernel.org/en/developers/Regulatory
 
-         Most distributions have a CRDA package.  So if unsure, say N.
+         Most distributions have a CRDA package. So if unsure, say N.
 
 config CFG80211_CRDA_SUPPORT
        bool "support CRDA" if CFG80211_INTERNAL_REGDB
index b0915515640efed1ff4795103c0572aba48c38f4..9f1c4aa851efdf55ab81044b10ef58b7bd73ecf6 100644 (file)
@@ -352,6 +352,16 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
        WARN_ON(ops->add_station && !ops->del_station);
        WARN_ON(ops->add_mpath && !ops->del_mpath);
        WARN_ON(ops->join_mesh && !ops->leave_mesh);
+       WARN_ON(ops->start_p2p_device && !ops->stop_p2p_device);
+       WARN_ON(ops->start_ap && !ops->stop_ap);
+       WARN_ON(ops->join_ocb && !ops->leave_ocb);
+       WARN_ON(ops->suspend && !ops->resume);
+       WARN_ON(ops->sched_scan_start && !ops->sched_scan_stop);
+       WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
+       WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
+       WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
+       WARN_ON(ops->set_tx_power && !ops->get_tx_power);
+       WARN_ON(ops->set_antenna && !ops->get_antenna);
 
        alloc_size = sizeof(*rdev) + sizeof_priv;
 
@@ -1147,6 +1157,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                return NOTIFY_DONE;
        }
 
+       wireless_nlevent_flush();
+
        return NOTIFY_OK;
 }
 
index fb44fa3bf4efa750298163a15534572c43229b2e..ff328250bc442db6a9e8e5136496098d4270bf6f 100644 (file)
@@ -711,7 +711,7 @@ EXPORT_SYMBOL(cfg80211_rx_mgmt);
 
 void cfg80211_dfs_channels_update_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work;
+       struct delayed_work *delayed_work = to_delayed_work(work);
        struct cfg80211_registered_device *rdev;
        struct cfg80211_chan_def chandef;
        struct ieee80211_supported_band *sband;
@@ -721,7 +721,6 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
        unsigned long timeout, next_time = 0;
        int bandid, i;
 
-       delayed_work = container_of(work, struct delayed_work, work);
        rdev = container_of(delayed_work, struct cfg80211_registered_device,
                            dfs_update_channels_wk);
        wiphy = &rdev->wiphy;
index d4786f2802aa3c94f0a9bfcba846c98ec5b2b249..268cb493f6a5496bf596d517b6bc62c348e6e9d9 100644 (file)
@@ -401,6 +401,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
        [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
        [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
+       [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
 };
 
 /* policy for the key attributes */
@@ -3461,6 +3462,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                        return PTR_ERR(params.acl);
        }
 
+       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+               return -EOPNOTSUPP;
+
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
@@ -7980,6 +7985,12 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
                connect.flags |= ASSOC_REQ_USE_RRM;
        }
 
+       connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+               kzfree(connkeys);
+               return -EOPNOTSUPP;
+       }
+
        wdev_lock(dev->ieee80211_ptr);
        err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
        wdev_unlock(dev->ieee80211_ptr);
index 3b0ce1c484a3dd33f2e2c81edeeadb79a48e2dcc..c5fb317eee68f15a4665a090f7aee16b08ec6765 100644 (file)
 #include "regdb.h"
 #include "nl80211.h"
 
-#ifdef CONFIG_CFG80211_REG_DEBUG
-#define REG_DBG_PRINT(format, args...)                 \
-       printk(KERN_DEBUG pr_fmt(format), ##args)
-#else
-#define REG_DBG_PRINT(args...)
-#endif
-
 /*
  * Grace period we give before making sure all current interfaces reside on
  * channels allowed by the current regulatory domain.
@@ -178,12 +171,10 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy)
        if (wiphy_regd->dfs_region == regd->dfs_region)
                goto out;
 
-       REG_DBG_PRINT("%s: device specific dfs_region "
-                     "(%s) disagrees with cfg80211's "
-                     "central dfs_region (%s)\n",
-                     dev_name(&wiphy->dev),
-                     reg_dfs_region_str(wiphy_regd->dfs_region),
-                     reg_dfs_region_str(regd->dfs_region));
+       pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n",
+                dev_name(&wiphy->dev),
+                reg_dfs_region_str(wiphy_regd->dfs_region),
+                reg_dfs_region_str(regd->dfs_region));
 
 out:
        return regd->dfs_region;
@@ -231,20 +222,22 @@ static const struct ieee80211_regdomain world_regdom = {
                /* IEEE 802.11b/g, channels 1..11 */
                REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
                /* IEEE 802.11b/g, channels 12..13. */
-               REG_RULE(2467-10, 2472+10, 40, 6, 20,
-                       NL80211_RRF_NO_IR),
+               REG_RULE(2467-10, 2472+10, 20, 6, 20,
+                       NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW),
                /* IEEE 802.11 channel 14 - Only JP enables
                 * this and for 802.11b only */
                REG_RULE(2484-10, 2484+10, 20, 6, 20,
                        NL80211_RRF_NO_IR |
                        NL80211_RRF_NO_OFDM),
                /* IEEE 802.11a, channel 36..48 */
-               REG_RULE(5180-10, 5240+10, 160, 6, 20,
-                        NL80211_RRF_NO_IR),
+               REG_RULE(5180-10, 5240+10, 80, 6, 20,
+                        NL80211_RRF_NO_IR |
+                        NL80211_RRF_AUTO_BW),
 
                /* IEEE 802.11a, channel 52..64 - DFS required */
-               REG_RULE(5260-10, 5320+10, 160, 6, 20,
+               REG_RULE(5260-10, 5320+10, 80, 6, 20,
                        NL80211_RRF_NO_IR |
+                       NL80211_RRF_AUTO_BW |
                        NL80211_RRF_DFS),
 
                /* IEEE 802.11a, channel 100..144 - DFS required */
@@ -541,7 +534,7 @@ static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
 
 static void crda_timeout_work(struct work_struct *work)
 {
-       REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+       pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
        rtnl_lock();
        reg_crda_timeouts++;
        restore_regulatory_settings(true);
@@ -583,7 +576,7 @@ static int call_crda(const char *alpha2)
 
        if (!is_world_regdom((char *) alpha2))
                pr_debug("Calling CRDA for country: %c%c\n",
-                       alpha2[0], alpha2[1]);
+                        alpha2[0], alpha2[1]);
        else
                pr_debug("Calling CRDA to update world regulatory domain\n");
 
@@ -1130,42 +1123,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
 }
 EXPORT_SYMBOL(reg_initiator_name);
 
-static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
-                                   struct ieee80211_channel *chan,
-                                   const struct ieee80211_reg_rule *reg_rule)
-{
-#ifdef CONFIG_CFG80211_REG_DEBUG
-       const struct ieee80211_power_rule *power_rule;
-       const struct ieee80211_freq_range *freq_range;
-       char max_antenna_gain[32], bw[32];
-
-       power_rule = &reg_rule->power_rule;
-       freq_range = &reg_rule->freq_range;
-
-       if (!power_rule->max_antenna_gain)
-               snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
-       else
-               snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi",
-                        power_rule->max_antenna_gain);
-
-       if (reg_rule->flags & NL80211_RRF_AUTO_BW)
-               snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
-                        freq_range->max_bandwidth_khz,
-                        reg_get_max_bandwidth(regd, reg_rule));
-       else
-               snprintf(bw, sizeof(bw), "%d KHz",
-                        freq_range->max_bandwidth_khz);
-
-       REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
-                     chan->center_freq);
-
-       REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n",
-                     freq_range->start_freq_khz, freq_range->end_freq_khz,
-                     bw, max_antenna_gain,
-                     power_rule->max_eirp);
-#endif
-}
-
 static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd,
                                          const struct ieee80211_reg_rule *reg_rule,
                                          const struct ieee80211_channel *chan)
@@ -1240,20 +1197,19 @@ static void handle_channel(struct wiphy *wiphy,
                if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
                    request_wiphy && request_wiphy == wiphy &&
                    request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
-                       REG_DBG_PRINT("Disabling freq %d MHz for good\n",
-                                     chan->center_freq);
+                       pr_debug("Disabling freq %d MHz for good\n",
+                                chan->center_freq);
                        chan->orig_flags |= IEEE80211_CHAN_DISABLED;
                        chan->flags = chan->orig_flags;
                } else {
-                       REG_DBG_PRINT("Disabling freq %d MHz\n",
-                                     chan->center_freq);
+                       pr_debug("Disabling freq %d MHz\n",
+                                chan->center_freq);
                        chan->flags |= IEEE80211_CHAN_DISABLED;
                }
                return;
        }
 
        regd = reg_get_regdomain(wiphy);
-       chan_reg_rule_print_dbg(regd, chan, reg_rule);
 
        power_rule = &reg_rule->power_rule;
        bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
@@ -1391,18 +1347,15 @@ static bool ignore_reg_update(struct wiphy *wiphy,
                return true;
 
        if (!lr) {
-               REG_DBG_PRINT("Ignoring regulatory request set by %s "
-                             "since last_request is not set\n",
-                             reg_initiator_name(initiator));
+               pr_debug("Ignoring regulatory request set by %s since last_request is not set\n",
+                        reg_initiator_name(initiator));
                return true;
        }
 
        if (initiator == NL80211_REGDOM_SET_BY_CORE &&
            wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
-               REG_DBG_PRINT("Ignoring regulatory request set by %s "
-                             "since the driver uses its own custom "
-                             "regulatory domain\n",
-                             reg_initiator_name(initiator));
+               pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n",
+                        reg_initiator_name(initiator));
                return true;
        }
 
@@ -1413,10 +1366,8 @@ static bool ignore_reg_update(struct wiphy *wiphy,
        if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd &&
            initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
            !is_world_regdom(lr->alpha2)) {
-               REG_DBG_PRINT("Ignoring regulatory request set by %s "
-                             "since the driver requires its own regulatory "
-                             "domain to be set first\n",
-                             reg_initiator_name(initiator));
+               pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n",
+                        reg_initiator_name(initiator));
                return true;
        }
 
@@ -1697,7 +1648,7 @@ static void reg_check_chans_work(struct work_struct *work)
 {
        struct cfg80211_registered_device *rdev;
 
-       REG_DBG_PRINT("Verifying active interfaces after reg change\n");
+       pr_debug("Verifying active interfaces after reg change\n");
        rtnl_lock();
 
        list_for_each_entry(rdev, &cfg80211_rdev_list, list)
@@ -1779,8 +1730,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
        }
 
        if (IS_ERR(reg_rule)) {
-               REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
-                             chan->center_freq);
+               pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
+                        chan->center_freq);
                if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
                        chan->flags |= IEEE80211_CHAN_DISABLED;
                } else {
@@ -1790,8 +1741,6 @@ static void handle_channel_custom(struct wiphy *wiphy,
                return;
        }
 
-       chan_reg_rule_print_dbg(regd, chan, reg_rule);
-
        power_rule = &reg_rule->power_rule;
        bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
 
@@ -2522,7 +2471,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
        if (is_user_regdom_saved()) {
                /* Unless we're asked to ignore it and reset it */
                if (reset_user) {
-                       REG_DBG_PRINT("Restoring regulatory settings including user preference\n");
+                       pr_debug("Restoring regulatory settings including user preference\n");
                        user_alpha2[0] = '9';
                        user_alpha2[1] = '7';
 
@@ -2532,24 +2481,24 @@ static void restore_alpha2(char *alpha2, bool reset_user)
                         * back as they were for a full restore.
                         */
                        if (!is_world_regdom(ieee80211_regdom)) {
-                               REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
-                                             ieee80211_regdom[0], ieee80211_regdom[1]);
+                               pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
+                                        ieee80211_regdom[0], ieee80211_regdom[1]);
                                alpha2[0] = ieee80211_regdom[0];
                                alpha2[1] = ieee80211_regdom[1];
                        }
                } else {
-                       REG_DBG_PRINT("Restoring regulatory settings while preserving user preference for: %c%c\n",
-                                     user_alpha2[0], user_alpha2[1]);
+                       pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n",
+                                user_alpha2[0], user_alpha2[1]);
                        alpha2[0] = user_alpha2[0];
                        alpha2[1] = user_alpha2[1];
                }
        } else if (!is_world_regdom(ieee80211_regdom)) {
-               REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
-                             ieee80211_regdom[0], ieee80211_regdom[1]);
+               pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
+                        ieee80211_regdom[0], ieee80211_regdom[1]);
                alpha2[0] = ieee80211_regdom[0];
                alpha2[1] = ieee80211_regdom[1];
        } else
-               REG_DBG_PRINT("Restoring regulatory settings\n");
+               pr_debug("Restoring regulatory settings\n");
 }
 
 static void restore_custom_reg_settings(struct wiphy *wiphy)
@@ -2661,14 +2610,14 @@ static void restore_regulatory_settings(bool reset_user)
        list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
        spin_unlock(&reg_requests_lock);
 
-       REG_DBG_PRINT("Kicking the queue\n");
+       pr_debug("Kicking the queue\n");
 
        schedule_work(&reg_work);
 }
 
 void regulatory_hint_disconnect(void)
 {
-       REG_DBG_PRINT("All devices are disconnected, going to restore regulatory settings\n");
+       pr_debug("All devices are disconnected, going to restore regulatory settings\n");
        restore_regulatory_settings(false);
 }
 
@@ -2716,10 +2665,10 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
        if (!reg_beacon)
                return -ENOMEM;
 
-       REG_DBG_PRINT("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
-                     beacon_chan->center_freq,
-                     ieee80211_frequency_to_channel(beacon_chan->center_freq),
-                     wiphy_name(wiphy));
+       pr_debug("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
+                beacon_chan->center_freq,
+                ieee80211_frequency_to_channel(beacon_chan->center_freq),
+                wiphy_name(wiphy));
 
        memcpy(&reg_beacon->chan, beacon_chan,
               sizeof(struct ieee80211_channel));
@@ -2745,7 +2694,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
        const struct ieee80211_power_rule *power_rule = NULL;
        char bw[32], cac_time[32];
 
-       pr_info("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n");
+       pr_debug("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n");
 
        for (i = 0; i < rd->n_reg_rules; i++) {
                reg_rule = &rd->reg_rules[i];
@@ -2772,7 +2721,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
                 * in certain regions
                 */
                if (power_rule->max_antenna_gain)
-                       pr_info("  (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n",
+                       pr_debug("  (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n",
                                freq_range->start_freq_khz,
                                freq_range->end_freq_khz,
                                bw,
@@ -2780,7 +2729,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
                                power_rule->max_eirp,
                                cac_time);
                else
-                       pr_info("  (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n",
+                       pr_debug("  (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n",
                                freq_range->start_freq_khz,
                                freq_range->end_freq_khz,
                                bw,
@@ -2798,8 +2747,7 @@ bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region)
        case NL80211_DFS_JP:
                return true;
        default:
-               REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
-                             dfs_region);
+               pr_debug("Ignoring uknown DFS master region: %d\n", dfs_region);
                return false;
        }
 }
@@ -2813,35 +2761,35 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
                        struct cfg80211_registered_device *rdev;
                        rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx);
                        if (rdev) {
-                               pr_info("Current regulatory domain updated by AP to: %c%c\n",
+                               pr_debug("Current regulatory domain updated by AP to: %c%c\n",
                                        rdev->country_ie_alpha2[0],
                                        rdev->country_ie_alpha2[1]);
                        } else
-                               pr_info("Current regulatory domain intersected:\n");
+                               pr_debug("Current regulatory domain intersected:\n");
                } else
-                       pr_info("Current regulatory domain intersected:\n");
+                       pr_debug("Current regulatory domain intersected:\n");
        } else if (is_world_regdom(rd->alpha2)) {
-               pr_info("World regulatory domain updated:\n");
+               pr_debug("World regulatory domain updated:\n");
        } else {
                if (is_unknown_alpha2(rd->alpha2))
-                       pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
+                       pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n");
                else {
                        if (reg_request_cell_base(lr))
-                               pr_info("Regulatory domain changed to country: %c%c by Cell Station\n",
+                               pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n",
                                        rd->alpha2[0], rd->alpha2[1]);
                        else
-                               pr_info("Regulatory domain changed to country: %c%c\n",
+                               pr_debug("Regulatory domain changed to country: %c%c\n",
                                        rd->alpha2[0], rd->alpha2[1]);
                }
        }
 
-       pr_info(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region));
+       pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region));
        print_rd_rules(rd);
 }
 
 static void print_regdomain_info(const struct ieee80211_regdomain *rd)
 {
-       pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
+       pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
        print_rd_rules(rd);
 }
 
@@ -2862,7 +2810,8 @@ static int reg_set_rd_user(const struct ieee80211_regdomain *rd,
                return -EALREADY;
 
        if (!is_valid_rd(rd)) {
-               pr_err("Invalid regulatory domain detected:\n");
+               pr_err("Invalid regulatory domain detected: %c%c\n",
+                      rd->alpha2[0], rd->alpha2[1]);
                print_regdomain_info(rd);
                return -EINVAL;
        }
@@ -2898,7 +2847,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
                return -EALREADY;
 
        if (!is_valid_rd(rd)) {
-               pr_err("Invalid regulatory domain detected:\n");
+               pr_err("Invalid regulatory domain detected: %c%c\n",
+                      rd->alpha2[0], rd->alpha2[1]);
                print_regdomain_info(rd);
                return -EINVAL;
        }
@@ -2956,7 +2906,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
         */
 
        if (!is_valid_rd(rd)) {
-               pr_err("Invalid regulatory domain detected:\n");
+               pr_err("Invalid regulatory domain detected: %c%c\n",
+                      rd->alpha2[0], rd->alpha2[1]);
                print_regdomain_info(rd);
                return -EINVAL;
        }
index 8020b5b094d4c8fba0c0f2431f8af7d8ecc40dca..79bd3a171caa83ed8ae811b744fd271475b0257a 100644 (file)
@@ -264,7 +264,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
                               wdev->conn->params.bssid,
                               wdev->conn->params.ssid,
                               wdev->conn->params.ssid_len,
-                              IEEE80211_BSS_TYPE_ESS,
+                              wdev->conn_bss_type,
                               IEEE80211_PRIVACY(wdev->conn->params.privacy));
        if (!bss)
                return NULL;
@@ -687,7 +687,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
                bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
-                                      IEEE80211_BSS_TYPE_ESS,
+                                      wdev->conn_bss_type,
                                       IEEE80211_PRIVACY_ANY);
                if (bss)
                        cfg80211_hold_bss(bss_from_pub(bss));
@@ -846,7 +846,7 @@ void cfg80211_roamed(struct net_device *dev,
 
        bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
                               wdev->ssid_len,
-                              IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+                              wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
        if (WARN_ON(!bss))
                return;
 
@@ -1017,6 +1017,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
        memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
        wdev->ssid_len = connect->ssid_len;
 
+       wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
+                                             IEEE80211_BSS_TYPE_ESS;
+
        if (!rdev->ops->connect)
                err = cfg80211_sme_connect(wdev, connect, prev_bssid);
        else
index 92770427b211f559be4735584d74043ec6301802..6e4eb35551776d497a85d91c1f0fab2ecf25d72b 100644 (file)
@@ -393,9 +393,9 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
 
-unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+static unsigned int __ieee80211_get_mesh_hdrlen(u8 flags)
 {
-       int ae = meshhdr->flags & MESH_FLAGS_AE;
+       int ae = flags & MESH_FLAGS_AE;
        /* 802.11-2012, 8.2.4.7.3 */
        switch (ae) {
        default:
@@ -407,21 +407,31 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
                return 18;
        }
 }
+
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+{
+       return __ieee80211_get_mesh_hdrlen(meshhdr->flags);
+}
 EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
 
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
-                          enum nl80211_iftype iftype)
+static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
+                                   const u8 *addr, enum nl80211_iftype iftype)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       u16 hdrlen, ethertype;
-       u8 *payload;
-       u8 dst[ETH_ALEN];
-       u8 src[ETH_ALEN] __aligned(2);
+       struct {
+               u8 hdr[ETH_ALEN] __aligned(2);
+               __be16 proto;
+       } payload;
+       struct ethhdr tmp;
+       u16 hdrlen;
+       u8 mesh_flags = 0;
 
        if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
                return -1;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       if (skb->len < hdrlen + 8)
+               return -1;
 
        /* convert IEEE 802.11 header + possible LLC headers into Ethernet
         * header
@@ -432,8 +442,11 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
         *   1     0   BSSID SA    DA    n/a
         *   1     1   RA    TA    DA    SA
         */
-       memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
-       memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
+       memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
+       memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
+
+       if (iftype == NL80211_IFTYPE_MESH_POINT)
+               skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
 
        switch (hdr->frame_control &
                cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
@@ -450,44 +463,31 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
                             iftype != NL80211_IFTYPE_STATION))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       struct ieee80211s_hdr *meshdr =
-                               (struct ieee80211s_hdr *) (skb->data + hdrlen);
-                       /* make sure meshdr->flags is on the linear part */
-                       if (!pskb_may_pull(skb, hdrlen + 1))
-                               return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags & MESH_FLAGS_AE_A4)
                                return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
+                       if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
-                                       dst, ETH_ALEN);
-                               skb_copy_bits(skb, hdrlen +
-                                       offsetof(struct ieee80211s_hdr, eaddr2),
-                                       src, ETH_ALEN);
+                                       tmp.h_dest, 2 * ETH_ALEN);
                        }
-                       hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+                       hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
                }
                break;
        case cpu_to_le16(IEEE80211_FCTL_FROMDS):
                if ((iftype != NL80211_IFTYPE_STATION &&
                     iftype != NL80211_IFTYPE_P2P_CLIENT &&
                     iftype != NL80211_IFTYPE_MESH_POINT) ||
-                   (is_multicast_ether_addr(dst) &&
-                    ether_addr_equal(src, addr)))
+                   (is_multicast_ether_addr(tmp.h_dest) &&
+                    ether_addr_equal(tmp.h_source, addr)))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       struct ieee80211s_hdr *meshdr =
-                               (struct ieee80211s_hdr *) (skb->data + hdrlen);
-                       /* make sure meshdr->flags is on the linear part */
-                       if (!pskb_may_pull(skb, hdrlen + 1))
-                               return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A5_A6)
+                       if (mesh_flags & MESH_FLAGS_AE_A5_A6)
                                return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags & MESH_FLAGS_AE_A4)
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
-                                       src, ETH_ALEN);
-                       hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+                                       tmp.h_source, ETH_ALEN);
+                       hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
                }
                break;
        case cpu_to_le16(0):
@@ -498,33 +498,33 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
                break;
        }
 
-       if (!pskb_may_pull(skb, hdrlen + 8))
-               return -1;
-
-       payload = skb->data + hdrlen;
-       ethertype = (payload[6] << 8) | payload[7];
+       skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
+       tmp.h_proto = payload.proto;
 
-       if (likely((ether_addr_equal(payload, rfc1042_header) &&
-                   ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
-                  ether_addr_equal(payload, bridge_tunnel_header))) {
+       if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+                   tmp.h_proto != htons(ETH_P_AARP) &&
+                   tmp.h_proto != htons(ETH_P_IPX)) ||
+                  ether_addr_equal(payload.hdr, bridge_tunnel_header)))
                /* remove RFC1042 or Bridge-Tunnel encapsulation and
                 * replace EtherType */
-               skb_pull(skb, hdrlen + 6);
-               memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
-               memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
-       } else {
-               struct ethhdr *ehdr;
-               __be16 len;
+               hdrlen += ETH_ALEN + 2;
+       else
+               tmp.h_proto = htons(skb->len);
 
-               skb_pull(skb, hdrlen);
-               len = htons(skb->len);
+       pskb_pull(skb, hdrlen);
+
+       if (!ehdr)
                ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
-               memcpy(ehdr->h_dest, dst, ETH_ALEN);
-               memcpy(ehdr->h_source, src, ETH_ALEN);
-               ehdr->h_proto = len;
-       }
+       memcpy(ehdr, &tmp, sizeof(tmp));
+
        return 0;
 }
+
+int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+                          enum nl80211_iftype iftype)
+{
+       return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
+}
 EXPORT_SYMBOL(ieee80211_data_to_8023);
 
 int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
@@ -644,70 +644,75 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
 }
 EXPORT_SYMBOL(ieee80211_data_from_8023);
 
+static struct sk_buff *
+__ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+                      int offset, int len)
+{
+       struct sk_buff *frame;
+
+       if (skb->len - offset < len)
+               return NULL;
+
+       /*
+        * Allocate and reserve two bytes more for payload
+        * alignment since sizeof(struct ethhdr) is 14.
+        */
+       frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + len);
+
+       skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+       skb_copy_bits(skb, offset, skb_put(frame, len), len);
+
+       return frame;
+}
 
 void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
                              const u8 *addr, enum nl80211_iftype iftype,
                              const unsigned int extra_headroom,
                              bool has_80211_header)
 {
+       unsigned int hlen = ALIGN(extra_headroom, 4);
        struct sk_buff *frame = NULL;
        u16 ethertype;
        u8 *payload;
-       const struct ethhdr *eth;
-       int remaining, err;
-       u8 dst[ETH_ALEN], src[ETH_ALEN];
+       int offset = 0, remaining, err;
+       struct ethhdr eth;
+       bool reuse_skb = true;
+       bool last = false;
 
        if (has_80211_header) {
-               err = ieee80211_data_to_8023(skb, addr, iftype);
+               err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
                if (err)
                        goto out;
-
-               /* skip the wrapping header */
-               eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
-               if (!eth)
-                       goto out;
-       } else {
-               eth = (struct ethhdr *) skb->data;
        }
 
-       while (skb != frame) {
+       while (!last) {
+               unsigned int subframe_len;
+               int len;
                u8 padding;
-               __be16 len = eth->h_proto;
-               unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
-
-               remaining = skb->len;
-               memcpy(dst, eth->h_dest, ETH_ALEN);
-               memcpy(src, eth->h_source, ETH_ALEN);
 
+               skb_copy_bits(skb, offset, &eth, sizeof(eth));
+               len = ntohs(eth.h_proto);
+               subframe_len = sizeof(struct ethhdr) + len;
                padding = (4 - subframe_len) & 0x3;
+
                /* the last MSDU has no padding */
+               remaining = skb->len - offset;
                if (subframe_len > remaining)
                        goto purge;
 
-               skb_pull(skb, sizeof(struct ethhdr));
+               offset += sizeof(struct ethhdr);
                /* reuse skb for the last subframe */
-               if (remaining <= subframe_len + padding)
+               last = remaining <= subframe_len + padding;
+               if (!skb_is_nonlinear(skb) && last) {
+                       skb_pull(skb, offset);
                        frame = skb;
-               else {
-                       unsigned int hlen = ALIGN(extra_headroom, 4);
-                       /*
-                        * Allocate and reserve two bytes more for payload
-                        * alignment since sizeof(struct ethhdr) is 14.
-                        */
-                       frame = dev_alloc_skb(hlen + subframe_len + 2);
+                       reuse_skb = true;
+               } else {
+                       frame = __ieee80211_amsdu_copy(skb, hlen, offset, len);
                        if (!frame)
                                goto purge;
 
-                       skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
-                       memcpy(skb_put(frame, ntohs(len)), skb->data,
-                               ntohs(len));
-
-                       eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
-                                                       padding);
-                       if (!eth) {
-                               dev_kfree_skb(frame);
-                               goto purge;
-                       }
+                       offset += len + padding;
                }
 
                skb_reset_network_header(frame);
@@ -716,24 +721,20 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
 
                payload = frame->data;
                ethertype = (payload[6] << 8) | payload[7];
-
                if (likely((ether_addr_equal(payload, rfc1042_header) &&
                            ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
                           ether_addr_equal(payload, bridge_tunnel_header))) {
-                       /* remove RFC1042 or Bridge-Tunnel
-                        * encapsulation and replace EtherType */
-                       skb_pull(frame, 6);
-                       memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
-                       memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
-               } else {
-                       memcpy(skb_push(frame, sizeof(__be16)), &len,
-                               sizeof(__be16));
-                       memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
-                       memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+                       eth.h_proto = htons(ethertype);
+                       skb_pull(frame, ETH_ALEN + 2);
                }
+
+               memcpy(skb_push(frame, sizeof(eth)), &eth, sizeof(eth));
                __skb_queue_tail(list, frame);
        }
 
+       if (!reuse_skb)
+               dev_kfree_skb(skb);
+
        return;
 
  purge:
index c8717c1d082e702f9b071c480e873b408b400daf..b50ee5d622e14d4fb486ce0c533757e958702f54 100644 (file)
@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
 
 /* IW event code */
 
+void wireless_nlevent_flush(void)
+{
+       struct sk_buff *skb;
+       struct net *net;
+
+       ASSERT_RTNL();
+
+       for_each_net(net) {
+               while ((skb = skb_dequeue(&net->wext_nlevents)))
+                       rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+                                   GFP_KERNEL);
+       }
+}
+EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
+
+static int wext_netdev_notifier_call(struct notifier_block *nb,
+                                    unsigned long state, void *ptr)
+{
+       /*
+        * When a netdev changes state in any way, flush all pending messages
+        * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
+        * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
+        * or similar - all of which could otherwise happen due to delays from
+        * schedule_work().
+        */
+       wireless_nlevent_flush();
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block wext_netdev_notifier = {
+       .notifier_call = wext_netdev_notifier_call,
+};
+
 static int __net_init wext_pernet_init(struct net *net)
 {
        skb_queue_head_init(&net->wext_nlevents);
@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
 
 static int __init wireless_nlevent_init(void)
 {
-       return register_pernet_subsys(&wext_pernet_ops);
+       int err = register_pernet_subsys(&wext_pernet_ops);
+
+       if (err)
+               return err;
+
+       return register_netdevice_notifier(&wext_netdev_notifier);
 }
 
 subsys_initcall(wireless_nlevent_init);
@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
 /* Process events generated by the wireless layer or the driver. */
 static void wireless_nlevent_process(struct work_struct *work)
 {
-       struct sk_buff *skb;
-       struct net *net;
-
        rtnl_lock();
-
-       for_each_net(net) {
-               while ((skb = skb_dequeue(&net->wext_nlevents)))
-                       rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
-                                   GFP_KERNEL);
-       }
-
+       wireless_nlevent_flush();
        rtnl_unlock();
 }
 
index 6299ee95cd11b63112ae5b7875872cb72ca91208..ad466ed3309307c79a7d393359f74a6c558eef1b 100644 (file)
@@ -89,6 +89,100 @@ static void test_hashmap_sanity(int i, void *data)
        close(map_fd);
 }
 
+/* sanity tests for percpu map API */
+static void test_percpu_hashmap_sanity(int task, void *data)
+{
+       long long key, next_key;
+       int expected_key_mask = 0;
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       long long value[nr_cpus];
+       int map_fd, i;
+
+       map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
+                               sizeof(value[0]), 2);
+       if (map_fd < 0) {
+               printf("failed to create hashmap '%s'\n", strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               value[i] = i + 100;
+       key = 1;
+       /* insert key=1 element */
+       assert(!(expected_key_mask & key));
+       assert(bpf_update_elem(map_fd, &key, value, BPF_ANY) == 0);
+       expected_key_mask |= key;
+
+       /* BPF_NOEXIST means: add new element if it doesn't exist */
+       assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 &&
+              /* key=1 already exists */
+              errno == EEXIST);
+
+       /* -1 is an invalid flag */
+       assert(bpf_update_elem(map_fd, &key, value, -1) == -1 &&
+              errno == EINVAL);
+
+       /* check that key=1 can be found. value could be 0 if the lookup
+        * was run from a different cpu.
+        */
+       value[0] = 1;
+       assert(bpf_lookup_elem(map_fd, &key, value) == 0 && value[0] == 100);
+
+       key = 2;
+       /* check that key=2 is not found */
+       assert(bpf_lookup_elem(map_fd, &key, value) == -1 && errno == ENOENT);
+
+       /* BPF_EXIST means: update existing element */
+       assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == -1 &&
+              /* key=2 is not there */
+              errno == ENOENT);
+
+       /* insert key=2 element */
+       assert(!(expected_key_mask & key));
+       assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
+       expected_key_mask |= key;
+
+       /* key=1 and key=2 were inserted, check that key=0 cannot be inserted
+        * due to max_entries limit
+        */
+       key = 0;
+       assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 &&
+              errno == E2BIG);
+
+       /* check that key = 0 doesn't exist */
+       assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
+
+       /* iterate over two elements */
+       while (!bpf_get_next_key(map_fd, &key, &next_key)) {
+               assert((expected_key_mask & next_key) == next_key);
+               expected_key_mask &= ~next_key;
+
+               assert(bpf_lookup_elem(map_fd, &next_key, value) == 0);
+               for (i = 0; i < nr_cpus; i++)
+                       assert(value[i] == i + 100);
+
+               key = next_key;
+       }
+       assert(errno == ENOENT);
+
+       /* Update with BPF_EXIST */
+       key = 1;
+       assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == 0);
+
+       /* delete both elements */
+       key = 1;
+       assert(bpf_delete_elem(map_fd, &key) == 0);
+       key = 2;
+       assert(bpf_delete_elem(map_fd, &key) == 0);
+       assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
+
+       key = 0;
+       /* check that map is empty */
+       assert(bpf_get_next_key(map_fd, &key, &next_key) == -1 &&
+              errno == ENOENT);
+       close(map_fd);
+}
+
 static void test_arraymap_sanity(int i, void *data)
 {
        int key, next_key, map_fd;
@@ -142,6 +236,94 @@ static void test_arraymap_sanity(int i, void *data)
        close(map_fd);
 }
 
+static void test_percpu_arraymap_many_keys(void)
+{
+       unsigned nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       unsigned nr_keys = 20000;
+       long values[nr_cpus];
+       int key, map_fd, i;
+
+       map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+                               sizeof(values[0]), nr_keys);
+       if (map_fd < 0) {
+               printf("failed to create per-cpu arraymap '%s'\n",
+                      strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               values[i] = i + 10;
+
+       for (key = 0; key < nr_keys; key++)
+               assert(bpf_update_elem(map_fd, &key, values, BPF_ANY) == 0);
+
+       for (key = 0; key < nr_keys; key++) {
+               for (i = 0; i < nr_cpus; i++)
+                       values[i] = 0;
+               assert(bpf_lookup_elem(map_fd, &key, values) == 0);
+               for (i = 0; i < nr_cpus; i++)
+                       assert(values[i] == i + 10);
+       }
+
+       close(map_fd);
+}
+
+static void test_percpu_arraymap_sanity(int i, void *data)
+{
+       unsigned nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       long values[nr_cpus];
+       int key, next_key, map_fd;
+
+       map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+                               sizeof(values[0]), 2);
+       if (map_fd < 0) {
+               printf("failed to create arraymap '%s'\n", strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               values[i] = i + 100;
+
+       key = 1;
+       /* insert key=1 element */
+       assert(bpf_update_elem(map_fd, &key, values, BPF_ANY) == 0);
+
+       values[0] = 0;
+       assert(bpf_update_elem(map_fd, &key, values, BPF_NOEXIST) == -1 &&
+              errno == EEXIST);
+
+       /* check that key=1 can be found */
+       assert(bpf_lookup_elem(map_fd, &key, values) == 0 && values[0] == 100);
+
+       key = 0;
+       /* check that key=0 is also found and zero initialized */
+       assert(bpf_lookup_elem(map_fd, &key, values) == 0 &&
+              values[0] == 0 && values[nr_cpus - 1] == 0);
+
+
+       /* check that key=2 cannot be inserted due to max_entries limit */
+       key = 2;
+       assert(bpf_update_elem(map_fd, &key, values, BPF_EXIST) == -1 &&
+              errno == E2BIG);
+
+       /* check that key = 2 doesn't exist */
+       assert(bpf_lookup_elem(map_fd, &key, values) == -1 && errno == ENOENT);
+
+       /* iterate over two elements */
+       assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
+              next_key == 0);
+       assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
+              next_key == 1);
+       assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
+              errno == ENOENT);
+
+       /* delete shouldn't succeed */
+       key = 1;
+       assert(bpf_delete_elem(map_fd, &key) == -1 && errno == EINVAL);
+
+       close(map_fd);
+}
+
 #define MAP_SIZE (32 * 1024)
 static void test_map_large(void)
 {
@@ -209,7 +391,9 @@ static void run_parallel(int tasks, void (*fn)(int i, void *data), void *data)
 static void test_map_stress(void)
 {
        run_parallel(100, test_hashmap_sanity, NULL);
+       run_parallel(100, test_percpu_hashmap_sanity, NULL);
        run_parallel(100, test_arraymap_sanity, NULL);
+       run_parallel(100, test_percpu_arraymap_sanity, NULL);
 }
 
 #define TASKS 1024
@@ -282,7 +466,11 @@ static void test_map_parallel(void)
 int main(void)
 {
        test_hashmap_sanity(0, NULL);
+       test_percpu_hashmap_sanity(0, NULL);
        test_arraymap_sanity(0, NULL);
+       test_percpu_arraymap_sanity(0, NULL);
+       test_percpu_arraymap_many_keys();
+
        test_map_large();
        test_map_parallel();
        test_map_stress();
index b32367cfbff4aff3020bb9c36c8faf6c981dd0f8..09c1adc27d426ed4adec7408a6fbae9193c39bb0 100644 (file)
@@ -70,7 +70,7 @@ struct hist_key {
 };
 
 struct bpf_map_def SEC("maps") my_hist_map = {
-       .type = BPF_MAP_TYPE_HASH,
+       .type = BPF_MAP_TYPE_PERCPU_HASH,
        .key_size = sizeof(struct hist_key),
        .value_size = sizeof(long),
        .max_entries = 1024,
index cd0241c1447a5fb37d0e9e1633f4a47a457f344f..ab5b19e68acf0c3d53916ce6324f57777cf3acbd 100644 (file)
@@ -37,6 +37,8 @@ struct hist_key {
 static void print_hist_for_pid(int fd, void *task)
 {
        struct hist_key key = {}, next_key;
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       long values[nr_cpus];
        char starstr[MAX_STARS];
        long value;
        long data[MAX_INDEX] = {};
@@ -49,7 +51,10 @@ static void print_hist_for_pid(int fd, void *task)
                        key = next_key;
                        continue;
                }
-               bpf_lookup_elem(fd, &next_key, &value);
+               bpf_lookup_elem(fd, &next_key, values);
+               value = 0;
+               for (i = 0; i < nr_cpus; i++)
+                       value += values[i];
                ind = next_key.index;
                data[ind] = value;
                if (value && ind > max_ind)
index bf337fbb09472cbe32bfbaff2d4313b7cafb58c6..9974c3d7c18b90f5850e87822a158c90e2645b8d 100644 (file)
@@ -20,7 +20,7 @@ struct bpf_map_def SEC("maps") my_map = {
 /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
  * example will no longer be meaningful
  */
-SEC("kprobe/blk_mq_start_request")
+SEC("kprobe/blk_start_request")
 int bpf_prog1(struct pt_regs *ctx)
 {
        long rq = PT_REGS_PARM1(ctx);
@@ -42,13 +42,13 @@ static unsigned int log2l(unsigned long long n)
 #define SLOTS 100
 
 struct bpf_map_def SEC("maps") lat_map = {
-       .type = BPF_MAP_TYPE_ARRAY,
+       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
        .key_size = sizeof(u32),
        .value_size = sizeof(u64),
        .max_entries = SLOTS,
 };
 
-SEC("kprobe/blk_update_request")
+SEC("kprobe/blk_account_io_completion")
 int bpf_prog2(struct pt_regs *ctx)
 {
        long rq = PT_REGS_PARM1(ctx);
@@ -81,7 +81,7 @@ int bpf_prog2(struct pt_regs *ctx)
 
        value = bpf_map_lookup_elem(&lat_map, &index);
        if (value)
-               __sync_fetch_and_add((long *)value, 1);
+               *value += 1;
 
        return 0;
 }
index 0aaa933ab93818df1634ba170e125e65dc6031d5..48716f7f0d8b9eae647a76ba93b07fe76a79f7bd 100644 (file)
 
 static void clear_stats(int fd)
 {
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       __u64 values[nr_cpus];
        __u32 key;
-       __u64 value = 0;
 
+       memset(values, 0, sizeof(values));
        for (key = 0; key < SLOTS; key++)
-               bpf_update_elem(fd, &key, &value, BPF_ANY);
+               bpf_update_elem(fd, &key, values, BPF_ANY);
 }
 
 const char *color[] = {
@@ -75,15 +77,20 @@ static void print_banner(void)
 
 static void print_hist(int fd)
 {
-       __u32 key;
-       __u64 value;
-       __u64 cnt[SLOTS];
-       __u64 max_cnt = 0;
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
        __u64 total_events = 0;
+       long values[nr_cpus];
+       __u64 max_cnt = 0;
+       __u64 cnt[SLOTS];
+       __u64 value;
+       __u32 key;
+       int i;
 
        for (key = 0; key < SLOTS; key++) {
+               bpf_lookup_elem(fd, &key, values);
                value = 0;
-               bpf_lookup_elem(fd, &key, &value);
+               for (i = 0; i < nr_cpus; i++)
+                       value += values[i];
                cnt[key] = value;
                total_events += value;
                if (value > max_cnt)
index 1c15717e0d5686972c1130fb718d0bc445c08dad..a1be75d0a5fd3fbf4742e555046896ea6fa6fe65 100644 (file)
@@ -23,8 +23,6 @@ include $(src)/Makefile
 PHONY += __dtbs_install_prep
 __dtbs_install_prep:
 ifeq ("$(dtbinst-root)", "$(obj)")
-       $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi
-       $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi
        $(Q)mkdir -p $(INSTALL_DTBS_PATH)
 endif
 
index d79cba4ce3ebfb24a660da011076deb81a6ff4ad..ebced77deb9c4dc380ab5f58751950adc0ac3d7f 100644 (file)
@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
 defconfig: $(obj)/conf
 ifeq ($(KBUILD_DEFCONFIG),)
        $< $(silent) --defconfig $(Kconfig)
-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
+else
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
        @$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
        $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
 else
        @$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
        $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
 endif
+endif
 
 %_defconfig: $(obj)/conf
        $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
index 0b7dc2fd7bac0e2985a0b16c023b98cca23325e7..dd243d2abd875b535d006535232821eefad3460b 100644 (file)
@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
                if (in)
                        goto load;
                sym_add_change_count(1);
-               if (!sym_defconfig_list) {
-                       sym_calc_value(modules_sym);
+               if (!sym_defconfig_list)
                        return 1;
-               }
 
                for_all_defaults(sym_defconfig_list, prop) {
                        if (expr_calc_value(prop->visible.expr) == no ||
@@ -403,7 +401,6 @@ setsym:
        }
        free(line);
        fclose(in);
-       sym_calc_value(modules_sym);
        return 0;
 }
 
@@ -414,8 +411,12 @@ int conf_read(const char *name)
 
        sym_set_change_count(0);
 
-       if (conf_read_simple(name, S_DEF_USER))
+       if (conf_read_simple(name, S_DEF_USER)) {
+               sym_calc_value(modules_sym);
                return 1;
+       }
+
+       sym_calc_value(modules_sym);
 
        for_all_symbols(i, sym) {
                sym_calc_value(sym);
@@ -846,6 +847,7 @@ static int conf_split_config(void)
 
        name = conf_get_autoconfig_name();
        conf_read_simple(name, S_DEF_AUTO);
+       sym_calc_value(modules_sym);
 
        if (chdir("include/config"))
                return 1;
index ba6c34ea5429535cd303b1aa1d6ebd6ae30dd912..8f22654c71b46bcd487ef791ecfc9b50b2dbf6f2 100755 (executable)
@@ -93,9 +93,10 @@ kallsyms()
        local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
                      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
 
-       ${NM} -n ${1} | \
-               scripts/kallsyms ${kallsymopt} | \
-               ${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
+       local afile="`basename ${2} .o`.S"
+
+       ${NM} -n ${1} | scripts/kallsyms ${kallsymopt} > ${afile}
+       ${CC} ${aflags} -c -o ${2} ${afile}
 }
 
 # Create map file with all symbols from ${1}
index e080746e1a6b528a218b416b5344368cd6db7065..48958d3cec9e38473ff3296f7f6bf4d5db41612a 100644 (file)
@@ -594,7 +594,8 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
                if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
                    strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
                    strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
-                   strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+                   strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 ||
+                   strcmp(symname, ".TOC.") == 0)
                        return 1;
        /* Do not ignore this symbol */
        return 0;
diff --git a/scripts/prune-kernel b/scripts/prune-kernel
new file mode 100755 (executable)
index 0000000..ab5034e
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# because I use CONFIG_LOCALVERSION_AUTO, not the same version again and
+# again, /boot and /lib/modules/ eventually fill up.
+# Dumb script to purge that stuff:
+
+for f in "$@"
+do
+        if rpm -qf "/lib/modules/$f" >/dev/null; then
+                echo "keeping $f (installed from rpm)"
+        elif [ $(uname -r) = "$f" ]; then
+                echo "keeping $f (running kernel) "
+        else
+                echo "removing $f"
+                rm -f "/boot/initramfs-$f.img" "/boot/System.map-$f"
+                rm -f "/boot/vmlinuz-$f"   "/boot/config-$f"
+                rm -rf "/lib/modules/$f"
+                new-kernel-pkg --remove $f
+        fi
+done
index 07a87311055c5b50916c87f2117f4c70b0321cfd..09ef276c4bdcaf8e0c419df61d1cd7b07bf5253c 100644 (file)
@@ -430,7 +430,8 @@ static int __key_instantiate_and_link(struct key *key,
 
                        /* and link it into the destination keyring */
                        if (keyring) {
-                               set_bit(KEY_FLAG_KEEP, &key->flags);
+                               if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
+                                       set_bit(KEY_FLAG_KEEP, &key->flags);
 
                                __key_link(key, _edit);
                        }
index 2bbb41822d8ec8882f8dacbbb4c5f8a1feac59ca..8495b93681906bd39f4065723461cddac2e7d347 100644 (file)
@@ -83,6 +83,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
        { TCPDIAG_GETSOCK,      NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
        { DCCPDIAG_GETSOCK,     NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
        { SOCK_DIAG_BY_FAMILY,  NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+       { SOCK_DESTROY,         NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
 };
 
 static struct nlmsg_perm nlmsg_xfrm_perms[] =
index e3e949126a5639c6a4a623750ec35922704c5110..a2a1e24becc6b8f2f6288a8853b02e6dd884becd 100644 (file)
@@ -97,11 +97,11 @@ config SND_PCM_TIMER
        bool "PCM timer interface" if EXPERT
        default y
        help
-         If you disable this option, pcm timer will be inavailable, so
-         those stubs used pcm timer (e.g. dmix, dsnoop & co) may work
+         If you disable this option, pcm timer will be unavailable, so
+         those stubs that use pcm timer (e.g. dmix, dsnoop & co) may work
          incorrectlly.
 
-         For some embedded device, we may disable it to reduce memory
+         For some embedded devices, we may disable it to reduce memory
          footprint, about 20KB on x86_64 platform.
 
 config SND_SEQUENCER_OSS
index 18b8dc45bb8f77ea0c2ce16e79305c7872f057bc..7fac3cae8abd0a232a3d7a1c2563999495df6ec2 100644 (file)
 #include <sound/compress_offload.h>
 #include <sound/compress_driver.h>
 
+/* struct snd_compr_codec_caps overflows the ioctl bit size for some
+ * architectures, so we need to disable the relevant ioctls.
+ */
+#if _IOC_SIZEBITS < 14
+#define COMPR_CODEC_CAPS_OVERFLOW
+#endif
+
 /* TODO:
  * - add substream support for multiple devices in case of
  *     SND_DYNAMIC_MINORS is not used
@@ -440,6 +447,7 @@ out:
        return retval;
 }
 
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
 static int
 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 {
@@ -463,6 +471,7 @@ out:
        kfree(caps);
        return retval;
 }
+#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 
 /* revisit this with snd_pcm_preallocate_xxx */
 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
@@ -801,9 +810,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
        case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
                retval = snd_compr_get_caps(stream, arg);
                break;
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
        case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
                retval = snd_compr_get_codec_caps(stream, arg);
                break;
+#endif
        case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
                retval = snd_compr_set_params(stream, arg);
                break;
index 0e73d03b30e3f0d712b26f65453346e419caa96d..ebc9fdfe64df618088a8d55efff8b5312af3385d 100644 (file)
@@ -835,7 +835,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
        return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
 }
 
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+                                    bool trylock)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct snd_pcm_hw_params *params, *sparams;
@@ -849,7 +850,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
        struct snd_mask sformat_mask;
        struct snd_mask mask;
 
-       if (mutex_lock_interruptible(&runtime->oss.params_lock))
+       if (trylock) {
+               if (!(mutex_trylock(&runtime->oss.params_lock)))
+                       return -EAGAIN;
+       } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
                return -EINTR;
        sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
        params = kmalloc(sizeof(*params), GFP_KERNEL);
@@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
                if (asubstream == NULL)
                        asubstream = substream;
                if (substream->runtime->oss.params) {
-                       err = snd_pcm_oss_change_params(substream);
+                       err = snd_pcm_oss_change_params(substream, false);
                        if (err < 0)
                                return err;
                }
@@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
                return 0;
        runtime = substream->runtime;
        if (runtime->oss.params) {
-               err = snd_pcm_oss_change_params(substream);
+               err = snd_pcm_oss_change_params(substream, false);
                if (err < 0)
                        return err;
        }
@@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
        runtime = substream->runtime;
 
        if (runtime->oss.params &&
-           (err = snd_pcm_oss_change_params(substream)) < 0)
+           (err = snd_pcm_oss_change_params(substream, false)) < 0)
                return err;
 
        info.fragsize = runtime->oss.period_bytes;
@@ -2804,7 +2808,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
                return -EIO;
        
        if (runtime->oss.params) {
-               if ((err = snd_pcm_oss_change_params(substream)) < 0)
+               /* use mutex_trylock() for params_lock for avoiding a deadlock
+                * between mmap_sem and params_lock taken by
+                * copy_from/to_user() in snd_pcm_oss_write/read()
+                */
+               err = snd_pcm_oss_change_params(substream, true);
+               if (err < 0)
                        return err;
        }
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
index a7759846fbaadff0c9493760ce7b14eff7ca8ad9..795437b1008200cd534f9a46ad0272f3dbc20ca4 100644 (file)
@@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
        unsigned long flags;
        long result = 0, count1;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
+       unsigned long appl_ptr;
 
+       spin_lock_irqsave(&runtime->lock, flags);
        while (count > 0 && runtime->avail) {
                count1 = runtime->buffer_size - runtime->appl_ptr;
                if (count1 > count)
                        count1 = count;
-               spin_lock_irqsave(&runtime->lock, flags);
                if (count1 > (int)runtime->avail)
                        count1 = runtime->avail;
+
+               /* update runtime->appl_ptr before unlocking for userbuf */
+               appl_ptr = runtime->appl_ptr;
+               runtime->appl_ptr += count1;
+               runtime->appl_ptr %= runtime->buffer_size;
+               runtime->avail -= count1;
+
                if (kernelbuf)
-                       memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
+                       memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
                if (userbuf) {
                        spin_unlock_irqrestore(&runtime->lock, flags);
                        if (copy_to_user(userbuf + result,
-                                        runtime->buffer + runtime->appl_ptr, count1)) {
+                                        runtime->buffer + appl_ptr, count1)) {
                                return result > 0 ? result : -EFAULT;
                        }
                        spin_lock_irqsave(&runtime->lock, flags);
                }
-               runtime->appl_ptr += count1;
-               runtime->appl_ptr %= runtime->buffer_size;
-               runtime->avail -= count1;
-               spin_unlock_irqrestore(&runtime->lock, flags);
                result += count1;
                count -= count1;
        }
+       spin_unlock_irqrestore(&runtime->lock, flags);
        return result;
 }
 
@@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
 EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
 
 /**
- * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
  * @substream: the rawmidi substream
  * @buffer: the buffer pointer
  * @count: data size to transfer
  *
- * Copies data from the internal output buffer to the given buffer.
- *
- * Call this in the interrupt handler when the midi output is ready,
- * and call snd_rawmidi_transmit_ack() after the transmission is
- * finished.
- *
- * Return: The size of copied data, or a negative error code on failure.
+ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
  */
-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
                              unsigned char *buffer, int count)
 {
-       unsigned long flags;
        int result, count1;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
@@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        result = 0;
-       spin_lock_irqsave(&runtime->lock, flags);
        if (runtime->avail >= runtime->buffer_size) {
                /* warning: lowlevel layer MUST trigger down the hardware */
                goto __skip;
@@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
                }
        }
       __skip:
+       return result;
+}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
+
+/**
+ * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * @substream: the rawmidi substream
+ * @buffer: the buffer pointer
+ * @count: data size to transfer
+ *
+ * Copies data from the internal output buffer to the given buffer.
+ *
+ * Call this in the interrupt handler when the midi output is ready,
+ * and call snd_rawmidi_transmit_ack() after the transmission is
+ * finished.
+ *
+ * Return: The size of copied data, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+                             unsigned char *buffer, int count)
+{
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&runtime->lock, flags);
+       result = __snd_rawmidi_transmit_peek(substream, buffer, count);
        spin_unlock_irqrestore(&runtime->lock, flags);
        return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
 
 /**
- * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * __snd_rawmidi_transmit_ack - acknowledge the transmission
  * @substream: the rawmidi substream
  * @count: the transferred count
  *
- * Advances the hardware pointer for the internal output buffer with
- * the given size and updates the condition.
- * Call after the transmission is finished.
- *
- * Return: The advanced size if successful, or a negative error code on failure.
+ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
  */
-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
 {
-       unsigned long flags;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
        if (runtime->buffer == NULL) {
@@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
                          "snd_rawmidi_transmit_ack: output is not active!!!\n");
                return -EINVAL;
        }
-       spin_lock_irqsave(&runtime->lock, flags);
        snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
        runtime->hw_ptr += count;
        runtime->hw_ptr %= runtime->buffer_size;
@@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
                if (runtime->drain || snd_rawmidi_ready(substream))
                        wake_up(&runtime->sleep);
        }
-       spin_unlock_irqrestore(&runtime->lock, flags);
        return count;
 }
+EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
+
+/**
+ * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * @substream: the rawmidi substream
+ * @count: the transferred count
+ *
+ * Advances the hardware pointer for the internal output buffer with
+ * the given size and updates the condition.
+ * Call after the transmission is finished.
+ *
+ * Return: The advanced size if successful, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+{
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&runtime->lock, flags);
+       result = __snd_rawmidi_transmit_ack(substream, count);
+       spin_unlock_irqrestore(&runtime->lock, flags);
+       return result;
+}
 EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
 
 /**
@@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
                         unsigned char *buffer, int count)
 {
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&runtime->lock, flags);
        if (!substream->opened)
-               return -EBADFD;
-       count = snd_rawmidi_transmit_peek(substream, buffer, count);
-       if (count < 0)
-               return count;
-       return snd_rawmidi_transmit_ack(substream, count);
+               result = -EBADFD;
+       else {
+               count = __snd_rawmidi_transmit_peek(substream, buffer, count);
+               if (count <= 0)
+                       result = count;
+               else
+                       result = __snd_rawmidi_transmit_ack(substream, count);
+       }
+       spin_unlock_irqrestore(&runtime->lock, flags);
+       return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit);
 
@@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
        unsigned long flags;
        long count1, result;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
+       unsigned long appl_ptr;
 
-       if (snd_BUG_ON(!kernelbuf && !userbuf))
+       if (!kernelbuf && !userbuf)
                return -EINVAL;
        if (snd_BUG_ON(!runtime->buffer))
                return -EINVAL;
@@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
                        count1 = count;
                if (count1 > (long)runtime->avail)
                        count1 = runtime->avail;
+
+               /* update runtime->appl_ptr before unlocking for userbuf */
+               appl_ptr = runtime->appl_ptr;
+               runtime->appl_ptr += count1;
+               runtime->appl_ptr %= runtime->buffer_size;
+               runtime->avail -= count1;
+
                if (kernelbuf)
-                       memcpy(runtime->buffer + runtime->appl_ptr,
+                       memcpy(runtime->buffer + appl_ptr,
                               kernelbuf + result, count1);
                else if (userbuf) {
                        spin_unlock_irqrestore(&runtime->lock, flags);
-                       if (copy_from_user(runtime->buffer + runtime->appl_ptr,
+                       if (copy_from_user(runtime->buffer + appl_ptr,
                                           userbuf + result, count1)) {
                                spin_lock_irqsave(&runtime->lock, flags);
                                result = result > 0 ? result : -EFAULT;
@@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
                        }
                        spin_lock_irqsave(&runtime->lock, flags);
                }
-               runtime->appl_ptr += count1;
-               runtime->appl_ptr %= runtime->buffer_size;
-               runtime->avail -= count1;
                result += count1;
                count -= count1;
        }
index b1221b29728e115ad95e50d4638de3e3949bb9da..6779e82b46dd7060bd982137ca75cfcdbbfa989e 100644 (file)
@@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
 
        dp->index = i;
        if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
-               pr_err("ALSA: seq_oss: too many applications\n");
+               pr_debug("ALSA: seq_oss: too many applications\n");
                rc = -ENOMEM;
                goto _error;
        }
index 0f3b38184fe58da2809f9fa4fa9c79a83967c59e..b16dbef041747e1762c3285875a5573ac119b031 100644 (file)
@@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
        struct seq_oss_synth *rec;
        struct seq_oss_synthinfo *info;
 
-       if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+       if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
                return;
        for (i = 0; i < dp->max_synthdev; i++) {
                info = &dp->synths[i];
index 13cfa815732db759935f2daaf4c997c73016eb8c..58e79e02f2174e29f409cf129030af87d240dfce 100644 (file)
@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
        else
                down_read(&grp->list_mutex);
        list_for_each_entry(subs, &grp->list_head, src_list) {
+               /* both ports ready? */
+               if (atomic_read(&subs->ref_count) != 2)
+                       continue;
                event->dest = subs->info.dest;
                if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
                        /* convert time according to flag with subscription */
index 55170a20ae7237246f5560e14c5b5066bb52ba35..921fb2bd8fadb37adf167d6bf52dd97270f005a4 100644 (file)
@@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
 }
 
 /* */
-enum group_type {
-       SRC_LIST, DEST_LIST
-};
-
 static int subscribe_port(struct snd_seq_client *client,
                          struct snd_seq_client_port *port,
                          struct snd_seq_port_subs_info *grp,
@@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
        return NULL;
 }
 
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack);
+
+static inline struct snd_seq_subscribers *
+get_subscriber(struct list_head *p, bool is_src)
+{
+       if (is_src)
+               return list_entry(p, struct snd_seq_subscribers, src_list);
+       else
+               return list_entry(p, struct snd_seq_subscribers, dest_list);
+}
+
 /*
  * remove all subscribers on the list
  * this is called from port_delete, for each src and dest list.
@@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
 static void clear_subscriber_list(struct snd_seq_client *client,
                                  struct snd_seq_client_port *port,
                                  struct snd_seq_port_subs_info *grp,
-                                 int grptype)
+                                 int is_src)
 {
        struct list_head *p, *n;
 
@@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
                struct snd_seq_client *c;
                struct snd_seq_client_port *aport;
 
-               if (grptype == SRC_LIST) {
-                       subs = list_entry(p, struct snd_seq_subscribers, src_list);
+               subs = get_subscriber(p, is_src);
+               if (is_src)
                        aport = get_client_port(&subs->info.dest, &c);
-               } else {
-                       subs = list_entry(p, struct snd_seq_subscribers, dest_list);
+               else
                        aport = get_client_port(&subs->info.sender, &c);
-               }
-               list_del(p);
-               unsubscribe_port(client, port, grp, &subs->info, 0);
+               delete_and_unsubscribe_port(client, port, subs, is_src, false);
+
                if (!aport) {
                        /* looks like the connected port is being deleted.
                         * we decrease the counter, and when both ports are deleted
@@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
                         */
                        if (atomic_dec_and_test(&subs->ref_count))
                                kfree(subs);
-               } else {
-                       /* ok we got the connected port */
-                       struct snd_seq_port_subs_info *agrp;
-                       agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
-                       down_write(&agrp->list_mutex);
-                       if (grptype == SRC_LIST)
-                               list_del(&subs->dest_list);
-                       else
-                               list_del(&subs->src_list);
-                       up_write(&agrp->list_mutex);
-                       unsubscribe_port(c, aport, agrp, &subs->info, 1);
-                       kfree(subs);
-                       snd_seq_port_unlock(aport);
-                       snd_seq_client_unlock(c);
+                       continue;
                }
+
+               /* ok we got the connected port */
+               delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
+               kfree(subs);
+               snd_seq_port_unlock(aport);
+               snd_seq_client_unlock(c);
        }
 }
 
@@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
        snd_use_lock_sync(&port->use_lock); 
 
        /* clear subscribers info */
-       clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
-       clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
+       clear_subscriber_list(client, port, &port->c_src, true);
+       clear_subscriber_list(client, port, &port->c_dest, false);
 
        if (port->private_free)
                port->private_free(port->private_data);
@@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
        return 0;
 }
 
-
-/* connect two ports */
-int snd_seq_port_connect(struct snd_seq_client *connector,
-                        struct snd_seq_client *src_client,
-                        struct snd_seq_client_port *src_port,
-                        struct snd_seq_client *dest_client,
-                        struct snd_seq_client_port *dest_port,
-                        struct snd_seq_port_subscribe *info)
+static int check_and_subscribe_port(struct snd_seq_client *client,
+                                   struct snd_seq_client_port *port,
+                                   struct snd_seq_subscribers *subs,
+                                   bool is_src, bool exclusive, bool ack)
 {
-       struct snd_seq_port_subs_info *src = &src_port->c_src;
-       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
-       struct snd_seq_subscribers *subs, *s;
-       int err, src_called = 0;
-       unsigned long flags;
-       int exclusive;
+       struct snd_seq_port_subs_info *grp;
+       struct list_head *p;
+       struct snd_seq_subscribers *s;
+       int err;
 
-       subs = kzalloc(sizeof(*subs), GFP_KERNEL);
-       if (! subs)
-               return -ENOMEM;
-
-       subs->info = *info;
-       atomic_set(&subs->ref_count, 2);
-
-       down_write(&src->list_mutex);
-       down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
-       exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
+       grp = is_src ? &port->c_src : &port->c_dest;
        err = -EBUSY;
+       down_write(&grp->list_mutex);
        if (exclusive) {
-               if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
+               if (!list_empty(&grp->list_head))
                        goto __error;
        } else {
-               if (src->exclusive || dest->exclusive)
+               if (grp->exclusive)
                        goto __error;
                /* check whether already exists */
-               list_for_each_entry(s, &src->list_head, src_list) {
-                       if (match_subs_info(info, &s->info))
-                               goto __error;
-               }
-               list_for_each_entry(s, &dest->list_head, dest_list) {
-                       if (match_subs_info(info, &s->info))
+               list_for_each(p, &grp->list_head) {
+                       s = get_subscriber(p, is_src);
+                       if (match_subs_info(&subs->info, &s->info))
                                goto __error;
                }
        }
 
-       if ((err = subscribe_port(src_client, src_port, src, info,
-                                 connector->number != src_client->number)) < 0)
-               goto __error;
-       src_called = 1;
-
-       if ((err = subscribe_port(dest_client, dest_port, dest, info,
-                                 connector->number != dest_client->number)) < 0)
+       err = subscribe_port(client, port, grp, &subs->info, ack);
+       if (err < 0) {
+               grp->exclusive = 0;
                goto __error;
+       }
 
        /* add to list */
-       write_lock_irqsave(&src->list_lock, flags);
-       // write_lock(&dest->list_lock); // no other lock yet
-       list_add_tail(&subs->src_list, &src->list_head);
-       list_add_tail(&subs->dest_list, &dest->list_head);
-       // write_unlock(&dest->list_lock); // no other lock yet
-       write_unlock_irqrestore(&src->list_lock, flags);
+       write_lock_irq(&grp->list_lock);
+       if (is_src)
+               list_add_tail(&subs->src_list, &grp->list_head);
+       else
+               list_add_tail(&subs->dest_list, &grp->list_head);
+       grp->exclusive = exclusive;
+       atomic_inc(&subs->ref_count);
+       write_unlock_irq(&grp->list_lock);
+       err = 0;
+
+ __error:
+       up_write(&grp->list_mutex);
+       return err;
+}
 
-       src->exclusive = dest->exclusive = exclusive;
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack)
+{
+       struct snd_seq_port_subs_info *grp;
+
+       grp = is_src ? &port->c_src : &port->c_dest;
+       down_write(&grp->list_mutex);
+       write_lock_irq(&grp->list_lock);
+       if (is_src)
+               list_del(&subs->src_list);
+       else
+               list_del(&subs->dest_list);
+       grp->exclusive = 0;
+       write_unlock_irq(&grp->list_lock);
+       up_write(&grp->list_mutex);
+
+       unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+/* connect two ports */
+int snd_seq_port_connect(struct snd_seq_client *connector,
+                        struct snd_seq_client *src_client,
+                        struct snd_seq_client_port *src_port,
+                        struct snd_seq_client *dest_client,
+                        struct snd_seq_client_port *dest_port,
+                        struct snd_seq_port_subscribe *info)
+{
+       struct snd_seq_subscribers *subs;
+       bool exclusive;
+       int err;
+
+       subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+       if (!subs)
+               return -ENOMEM;
+
+       subs->info = *info;
+       atomic_set(&subs->ref_count, 0);
+       INIT_LIST_HEAD(&subs->src_list);
+       INIT_LIST_HEAD(&subs->dest_list);
+
+       exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
+
+       err = check_and_subscribe_port(src_client, src_port, subs, true,
+                                      exclusive,
+                                      connector->number != src_client->number);
+       if (err < 0)
+               goto error;
+       err = check_and_subscribe_port(dest_client, dest_port, subs, false,
+                                      exclusive,
+                                      connector->number != dest_client->number);
+       if (err < 0)
+               goto error_dest;
 
-       up_write(&dest->list_mutex);
-       up_write(&src->list_mutex);
        return 0;
 
__error:
-       if (src_called)
-               unsubscribe_port(src_client, src_port, src, info,
-                                connector->number != src_client->number);
error_dest:
+       delete_and_unsubscribe_port(src_client, src_port, subs, true,
+                                   connector->number != src_client->number);
+ error:
        kfree(subs);
-       up_write(&dest->list_mutex);
-       up_write(&src->list_mutex);
        return err;
 }
 
-
 /* remove the connection */
 int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_client *src_client,
@@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_port_subscribe *info)
 {
        struct snd_seq_port_subs_info *src = &src_port->c_src;
-       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
        struct snd_seq_subscribers *subs;
        int err = -ENOENT;
-       unsigned long flags;
 
        down_write(&src->list_mutex);
-       down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
        /* look for the connection */
        list_for_each_entry(subs, &src->list_head, src_list) {
                if (match_subs_info(info, &subs->info)) {
-                       write_lock_irqsave(&src->list_lock, flags);
-                       // write_lock(&dest->list_lock);  // no lock yet
-                       list_del(&subs->src_list);
-                       list_del(&subs->dest_list);
-                       // write_unlock(&dest->list_lock);
-                       write_unlock_irqrestore(&src->list_lock, flags);
-                       src->exclusive = dest->exclusive = 0;
-                       unsubscribe_port(src_client, src_port, src, info,
-                                        connector->number != src_client->number);
-                       unsubscribe_port(dest_client, dest_port, dest, info,
-                                        connector->number != dest_client->number);
-                       kfree(subs);
+                       atomic_dec(&subs->ref_count); /* mark as not ready */
                        err = 0;
                        break;
                }
        }
-
-       up_write(&dest->list_mutex);
        up_write(&src->list_mutex);
-       return err;
+       if (err < 0)
+               return err;
+
+       delete_and_unsubscribe_port(src_client, src_port, subs, true,
+                                   connector->number != src_client->number);
+       delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
+                                   connector->number != dest_client->number);
+       kfree(subs);
+       return 0;
 }
 
 
index 82b220c769c131ecd05fea96fd0692c12d56642f..293104926098f7074001b6f6a4248d2a09ddd360 100644 (file)
@@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
 
 void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&tmr->lock, flags);
        /* setup defaults */
        tmr->ppq = 96;          /* 96 PPQ */
        tmr->tempo = 500000;    /* 120 BPM */
@@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
        tmr->preferred_resolution = seq_default_timer_resolution;
 
        tmr->skew = tmr->skew_base = SKEW_BASE;
+       spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
+static void seq_timer_reset(struct snd_seq_timer *tmr)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&tmr->lock, flags);
-
        /* reset time & songposition */
        tmr->cur_time.tv_sec = 0;
        tmr->cur_time.tv_nsec = 0;
 
        tmr->tick.cur_tick = 0;
        tmr->tick.fraction = 0;
+}
+
+void snd_seq_timer_reset(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
 
+       spin_lock_irqsave(&tmr->lock, flags);
+       seq_timer_reset(tmr);
        spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
@@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
        tmr = q->timer;
        if (tmr == NULL)
                return;
-       if (!tmr->running)
+       spin_lock_irqsave(&tmr->lock, flags);
+       if (!tmr->running) {
+               spin_unlock_irqrestore(&tmr->lock, flags);
                return;
+       }
 
        resolution *= ticks;
        if (tmr->skew != tmr->skew_base) {
@@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
                        (((resolution & 0xffff) * tmr->skew) >> 16);
        }
 
-       spin_lock_irqsave(&tmr->lock, flags);
-
        /* update timer */
        snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
 
@@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
        t->callback = snd_seq_timer_interrupt;
        t->callback_data = q;
        t->flags |= SNDRV_TIMER_IFLG_AUTO;
+       spin_lock_irq(&tmr->lock);
        tmr->timeri = t;
+       spin_unlock_irq(&tmr->lock);
        return 0;
 }
 
 int snd_seq_timer_close(struct snd_seq_queue *q)
 {
        struct snd_seq_timer *tmr;
+       struct snd_timer_instance *t;
        
        tmr = q->timer;
        if (snd_BUG_ON(!tmr))
                return -EINVAL;
-       if (tmr->timeri) {
-               snd_timer_stop(tmr->timeri);
-               snd_timer_close(tmr->timeri);
-               tmr->timeri = NULL;
-       }
+       spin_lock_irq(&tmr->lock);
+       t = tmr->timeri;
+       tmr->timeri = NULL;
+       spin_unlock_irq(&tmr->lock);
+       if (t)
+               snd_timer_close(t);
        return 0;
 }
 
-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+static int seq_timer_stop(struct snd_seq_timer *tmr)
 {
        if (! tmr->timeri)
                return -EINVAL;
@@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
        return 0;
 }
 
+int snd_seq_timer_stop(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&tmr->lock, flags);
+       err = seq_timer_stop(tmr);
+       spin_unlock_irqrestore(&tmr->lock, flags);
+       return err;
+}
+
 static int initialize_timer(struct snd_seq_timer *tmr)
 {
        struct snd_timer *t;
@@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
        return 0;
 }
 
-int snd_seq_timer_start(struct snd_seq_timer * tmr)
+static int seq_timer_start(struct snd_seq_timer *tmr)
 {
        if (! tmr->timeri)
                return -EINVAL;
        if (tmr->running)
-               snd_seq_timer_stop(tmr);
-       snd_seq_timer_reset(tmr);
+               seq_timer_stop(tmr);
+       seq_timer_reset(tmr);
        if (initialize_timer(tmr) < 0)
                return -EINVAL;
        snd_timer_start(tmr->timeri, tmr->ticks);
@@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
        return 0;
 }
 
-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+int snd_seq_timer_start(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&tmr->lock, flags);
+       err = seq_timer_start(tmr);
+       spin_unlock_irqrestore(&tmr->lock, flags);
+       return err;
+}
+
+static int seq_timer_continue(struct snd_seq_timer *tmr)
 {
        if (! tmr->timeri)
                return -EINVAL;
        if (tmr->running)
                return -EBUSY;
        if (! tmr->initialized) {
-               snd_seq_timer_reset(tmr);
+               seq_timer_reset(tmr);
                if (initialize_timer(tmr) < 0)
                        return -EINVAL;
        }
@@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
        return 0;
 }
 
+int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&tmr->lock, flags);
+       err = seq_timer_continue(tmr);
+       spin_unlock_irqrestore(&tmr->lock, flags);
+       return err;
+}
+
 /* return current 'real' time. use timeofday() to get better granularity. */
 snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
 {
        snd_seq_real_time_t cur_time;
+       unsigned long flags;
 
+       spin_lock_irqsave(&tmr->lock, flags);
        cur_time = tmr->cur_time;
        if (tmr->running) { 
                struct timeval tm;
@@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
                }
                snd_seq_sanity_real_time(&cur_time);
        }
-                
+       spin_unlock_irqrestore(&tmr->lock, flags);
        return cur_time;        
 }
 
index 3da2d48610b3a91e4b93b0967b1fac5afebe41f7..c82ed3e70506db65adcbd48f6bdd55bd9628633d 100644 (file)
@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
        struct snd_virmidi *vmidi = substream->runtime->private_data;
        int count, res;
        unsigned char buf[32], *pbuf;
+       unsigned long flags;
 
        if (up) {
                vmidi->trigger = 1;
                if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
                    !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
-                       snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
-                       return;         /* ignored */
+                       while (snd_rawmidi_transmit(substream, buf,
+                                                   sizeof(buf)) > 0) {
+                               /* ignored */
+                       }
+                       return;
                }
                if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
                        if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
                                return;
                        vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
                }
+               spin_lock_irqsave(&substream->runtime->lock, flags);
                while (1) {
-                       count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+                       count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
                        if (count <= 0)
                                break;
                        pbuf = buf;
@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
                                        snd_midi_event_reset_encode(vmidi->parser);
                                        continue;
                                }
-                               snd_rawmidi_transmit_ack(substream, res);
+                               __snd_rawmidi_transmit_ack(substream, res);
                                pbuf += res;
                                count -= res;
                                if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
                                        if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
-                                               return;
+                                               goto out;
                                        vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
                                }
                        }
                }
+       out:
+               spin_unlock_irqrestore(&substream->runtime->lock, flags);
        } else {
                vmidi->trigger = 0;
        }
@@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
  */
 static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
 {
+       struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
        struct snd_virmidi *vmidi = substream->runtime->private_data;
-       snd_midi_event_free(vmidi->parser);
+
+       write_lock_irq(&rdev->filelist_lock);
        list_del(&vmidi->list);
+       write_unlock_irq(&rdev->filelist_lock);
+       snd_midi_event_free(vmidi->parser);
        substream->runtime->private_data = NULL;
        kfree(vmidi);
        return 0;
index af1f68f7e315334202f191e3f049f044de9495f7..dca817fc78941b5f9109e8117b3fc5adb621562e 100644 (file)
@@ -422,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
        spin_lock_irqsave(&timer->lock, flags);
        list_for_each_entry(ts, &ti->slave_active_head, active_list)
                if (ts->ccallback)
-                       ts->ccallback(ti, event + 100, &tstamp, resolution);
+                       ts->ccallback(ts, event + 100, &tstamp, resolution);
        spin_unlock_irqrestore(&timer->lock, flags);
 }
 
@@ -451,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
        unsigned long flags;
 
        spin_lock_irqsave(&slave_active_lock, flags);
+       if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+               spin_unlock_irqrestore(&slave_active_lock, flags);
+               return -EBUSY;
+       }
        timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
        if (timeri->master && timeri->timer) {
                spin_lock(&timeri->timer->lock);
@@ -475,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
                return -EINVAL;
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
                result = snd_timer_start_slave(timeri);
-               snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+               if (result >= 0)
+                       snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
                return result;
        }
        timer = timeri->timer;
@@ -484,11 +489,18 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
        if (timer->card && timer->card->shutdown)
                return -ENODEV;
        spin_lock_irqsave(&timer->lock, flags);
+       if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+                            SNDRV_TIMER_IFLG_START)) {
+               result = -EBUSY;
+               goto unlock;
+       }
        timeri->ticks = timeri->cticks = ticks;
        timeri->pticks = 0;
        result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
        spin_unlock_irqrestore(&timer->lock, flags);
-       snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+       if (result >= 0)
+               snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
        return result;
 }
 
@@ -502,9 +514,17 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
 
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
                spin_lock_irqsave(&slave_active_lock, flags);
+               if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+                       spin_unlock_irqrestore(&slave_active_lock, flags);
+                       return -EBUSY;
+               }
+               if (timeri->timer)
+                       spin_lock(&timeri->timer->lock);
                timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
                list_del_init(&timeri->ack_list);
                list_del_init(&timeri->active_list);
+               if (timeri->timer)
+                       spin_unlock(&timeri->timer->lock);
                spin_unlock_irqrestore(&slave_active_lock, flags);
                goto __end;
        }
@@ -512,6 +532,11 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
        if (!timer)
                return -EINVAL;
        spin_lock_irqsave(&timer->lock, flags);
+       if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+                              SNDRV_TIMER_IFLG_START))) {
+               spin_unlock_irqrestore(&timer->lock, flags);
+               return -EBUSY;
+       }
        list_del_init(&timeri->ack_list);
        list_del_init(&timeri->active_list);
        if (timer->card && timer->card->shutdown) {
@@ -581,10 +606,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
        if (timer->card && timer->card->shutdown)
                return -ENODEV;
        spin_lock_irqsave(&timer->lock, flags);
+       if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+               result = -EBUSY;
+               goto unlock;
+       }
        if (!timeri->cticks)
                timeri->cticks = 1;
        timeri->pticks = 0;
        result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
        spin_unlock_irqrestore(&timer->lock, flags);
        snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
        return result;
@@ -718,8 +748,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
                        ti->cticks = ti->ticks;
                } else {
                        ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
-                       if (--timer->running)
-                               list_del_init(&ti->active_list);
+                       --timer->running;
+                       list_del_init(&ti->active_list);
                }
                if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
                    (ti->flags & SNDRV_TIMER_IFLG_FAST))
@@ -1032,11 +1062,21 @@ static int snd_timer_s_stop(struct snd_timer * timer)
        return 0;
 }
 
+static int snd_timer_s_close(struct snd_timer *timer)
+{
+       struct snd_timer_system_private *priv;
+
+       priv = (struct snd_timer_system_private *)timer->private_data;
+       del_timer_sync(&priv->tlist);
+       return 0;
+}
+
 static struct snd_timer_hardware snd_timer_system =
 {
        .flags =        SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
        .resolution =   1000000000L / HZ,
        .ticks =        10000000L,
+       .close =        snd_timer_s_close,
        .start =        snd_timer_s_start,
        .stop =         snd_timer_s_stop
 };
@@ -1893,6 +1933,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 {
        struct snd_timer_user *tu;
        long result = 0, unit;
+       int qhead;
        int err = 0;
 
        tu = file->private_data;
@@ -1904,7 +1945,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                        if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
                                err = -EAGAIN;
-                               break;
+                               goto _error;
                        }
 
                        set_current_state(TASK_INTERRUPTIBLE);
@@ -1919,42 +1960,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                        if (tu->disconnected) {
                                err = -ENODEV;
-                               break;
+                               goto _error;
                        }
                        if (signal_pending(current)) {
                                err = -ERESTARTSYS;
-                               break;
+                               goto _error;
                        }
                }
 
+               qhead = tu->qhead++;
+               tu->qhead %= tu->queue_size;
                spin_unlock_irq(&tu->qlock);
-               if (err < 0)
-                       goto _error;
 
                if (tu->tread) {
-                       if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
-                                        sizeof(struct snd_timer_tread))) {
+                       if (copy_to_user(buffer, &tu->tqueue[qhead],
+                                        sizeof(struct snd_timer_tread)))
                                err = -EFAULT;
-                               goto _error;
-                       }
                } else {
-                       if (copy_to_user(buffer, &tu->queue[tu->qhead++],
-                                        sizeof(struct snd_timer_read))) {
+                       if (copy_to_user(buffer, &tu->queue[qhead],
+                                        sizeof(struct snd_timer_read)))
                                err = -EFAULT;
-                               goto _error;
-                       }
                }
 
-               tu->qhead %= tu->queue_size;
-
-               result += unit;
-               buffer += unit;
-
                spin_lock_irq(&tu->qlock);
                tu->qused--;
+               if (err < 0)
+                       goto _error;
+               result += unit;
+               buffer += unit;
        }
-       spin_unlock_irq(&tu->qlock);
  _error:
+       spin_unlock_irq(&tu->qlock);
        return result > 0 ? result : err;
 }
 
index 75b74850c00535ee320f3827e094d9d4da1e30e3..c0f8f613f1f1b5e954ffa82b760c82353dd17c80 100644 (file)
@@ -109,6 +109,9 @@ struct dummy_timer_ops {
        snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
 };
 
+#define get_dummy_ops(substream) \
+       (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
 struct dummy_model {
        const char *name;
        int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@ struct snd_dummy {
        int iobox;
        struct snd_kcontrol *cd_volume_ctl;
        struct snd_kcontrol *cd_switch_ctl;
-       const struct dummy_timer_ops *timer_ops;
 };
 
 /*
@@ -231,6 +233,8 @@ static struct dummy_model *dummy_models[] = {
  */
 
 struct dummy_systimer_pcm {
+       /* ops must be the first item */
+       const struct dummy_timer_ops *timer_ops;
        spinlock_t lock;
        struct timer_list timer;
        unsigned long base_time;
@@ -366,6 +370,8 @@ static const struct dummy_timer_ops dummy_systimer_ops = {
  */
 
 struct dummy_hrtimer_pcm {
+       /* ops must be the first item */
+       const struct dummy_timer_ops *timer_ops;
        ktime_t base_time;
        ktime_t period_time;
        atomic_t running;
@@ -492,31 +498,25 @@ static const struct dummy_timer_ops dummy_hrtimer_ops = {
 
 static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
-               return dummy->timer_ops->start(substream);
+               return get_dummy_ops(substream)->start(substream);
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
-               return dummy->timer_ops->stop(substream);
+               return get_dummy_ops(substream)->stop(substream);
        }
        return -EINVAL;
 }
 
 static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-       return dummy->timer_ops->prepare(substream);
+       return get_dummy_ops(substream)->prepare(substream);
 }
 
 static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-       return dummy->timer_ops->pointer(substream);
+       return get_dummy_ops(substream)->pointer(substream);
 }
 
 static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
        struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
        struct dummy_model *model = dummy->model;
        struct snd_pcm_runtime *runtime = substream->runtime;
+       const struct dummy_timer_ops *ops;
        int err;
 
-       dummy->timer_ops = &dummy_systimer_ops;
+       ops = &dummy_systimer_ops;
 #ifdef CONFIG_HIGH_RES_TIMERS
        if (hrtimer)
-               dummy->timer_ops = &dummy_hrtimer_ops;
+               ops = &dummy_hrtimer_ops;
 #endif
 
-       err = dummy->timer_ops->create(substream);
+       err = ops->create(substream);
        if (err < 0)
                return err;
+       get_dummy_ops(substream) = ops;
 
        runtime->hw = dummy->pcm_hw;
        if (substream->pcm->device & 1) {
@@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
                        err = model->capture_constraints(substream->runtime);
        }
        if (err < 0) {
-               dummy->timer_ops->free(substream);
+               get_dummy_ops(substream)->free(substream);
                return err;
        }
        return 0;
@@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
 
 static int dummy_pcm_close(struct snd_pcm_substream *substream)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-       dummy->timer_ops->free(substream);
+       get_dummy_ops(substream)->free(substream);
        return 0;
 }
 
index 926e5dcbb66a1a3ec523482ceb5e199f8c455b64..5022c9b97ddfc007bc6119ab6a0cb41fafb2c16d 100644 (file)
@@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
        [6] = 0x07,
 };
 
-static unsigned int
-get_formation_index(unsigned int rate)
+static int
+get_formation_index(unsigned int rate, unsigned int *index)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
-               if (snd_bebob_rate_table[i] == rate)
-                       return i;
+               if (snd_bebob_rate_table[i] == rate) {
+                       *index = i;
+                       return 0;
+               }
        }
        return -EINVAL;
 }
@@ -425,7 +427,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
                goto end;
 
        /* confirm params for both streams */
-       index = get_formation_index(rate);
+       err = get_formation_index(rate, &index);
+       if (err < 0)
+               goto end;
        pcm_channels = bebob->tx_stream_formations[index].pcm;
        midi_channels = bebob->tx_stream_formations[index].midi;
        err = amdtp_am824_set_parameters(&bebob->tx_stream, rate,
index b02a5e8cad448c40368b9396dc1c0890c91a3a42..0ac92aba5bc1c9a4c01b1764a6f16a098e3bbae1 100644 (file)
@@ -63,7 +63,7 @@ struct amdtp_dot {
 #define BYTE_PER_SAMPLE (4)
 #define MAGIC_DOT_BYTE (2)
 #define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE)
-static const u8 dot_scrt(const u8 idx, const unsigned int off)
+static u8 dot_scrt(const u8 idx, const unsigned int off)
 {
        /*
         * the length of the added pattern only depends on the lower nibble
index 904ce0329fa1ac3d7e3364d66664d6ba60d6b6d8..040a96d1ba8ec1fa1dbfa4521a46e7efb865f435 100644 (file)
@@ -230,6 +230,7 @@ int snd_tscm_transaction_register(struct snd_tscm *tscm)
        return err;
 error:
        fw_core_remove_address_handler(&tscm->async_handler);
+       tscm->async_handler.callback_data = NULL;
        return err;
 }
 
@@ -276,6 +277,9 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm)
        __be32 reg;
        unsigned int i;
 
+       if (tscm->async_handler.callback_data == NULL)
+               return;
+
        /* Turn off FireWire LED. */
        reg = cpu_to_be32(0x0000008e);
        snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST,
@@ -297,6 +301,8 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm)
                           &reg, sizeof(reg), 0);
 
        fw_core_remove_address_handler(&tscm->async_handler);
+       tscm->async_handler.callback_data = NULL;
+
        for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++)
                snd_fw_async_midi_port_destroy(&tscm->out_ports[i]);
 }
index ee0bc183950888ba6ade12bb8441fa81be41ec11..e281c338e562d59e861303789d82c319b9a2ae9c 100644 (file)
@@ -21,7 +21,6 @@ static struct snd_tscm_spec model_specs[] = {
                .pcm_playback_analog_channels = 8,
                .midi_capture_ports = 4,
                .midi_playback_ports = 4,
-               .is_controller = true,
        },
        {
                .name = "FW-1082",
@@ -31,9 +30,16 @@ static struct snd_tscm_spec model_specs[] = {
                .pcm_playback_analog_channels = 2,
                .midi_capture_ports = 2,
                .midi_playback_ports = 2,
-               .is_controller = true,
        },
-       /* FW-1804 may be supported. */
+       {
+               .name = "FW-1804",
+               .has_adat = true,
+               .has_spdif = true,
+               .pcm_capture_analog_channels = 8,
+               .pcm_playback_analog_channels = 2,
+               .midi_capture_ports = 2,
+               .midi_playback_ports = 4,
+       },
 };
 
 static int identify_model(struct snd_tscm *tscm)
index 2d028d2bd3bdcdcd8be15a184e555786726c94d8..30ab77e924f7f68f9b939a230109152722ad9057 100644 (file)
@@ -39,7 +39,6 @@ struct snd_tscm_spec {
        unsigned int pcm_playback_analog_channels;
        unsigned int midi_capture_ports;
        unsigned int midi_playback_ports;
-       bool is_controller;
 };
 
 #define TSCM_MIDI_IN_PORT_MAX  4
@@ -72,9 +71,6 @@ struct snd_tscm {
        struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX];
        u8 running_status[TSCM_MIDI_OUT_PORT_MAX];
        bool on_sysex[TSCM_MIDI_OUT_PORT_MAX];
-
-       /* For control messages. */
-       struct snd_firewire_tascam_status *status;
 };
 
 #define TSCM_ADDR_BASE                 0xffff00000000ull
index 0216475fc759e23442a9f2ab7775c6966458b2cd..37adcc6cbe6bd852a290cc95ea58cc05863e88ba 100644 (file)
@@ -3,6 +3,7 @@
 config SND_WSS_LIB
         tristate
         select SND_PCM
+       select SND_TIMER
 
 config SND_SB_COMMON
         tristate
@@ -42,6 +43,7 @@ config SND_AD1816A
        select SND_OPL3_LIB
        select SND_MPU401_UART
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for Analog Devices SoundPort
          AD1816A or compatible sound chips.
@@ -209,6 +211,7 @@ config SND_GUSCLASSIC
        tristate "Gravis UltraSound Classic"
        select SND_RAWMIDI
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for Gravis UltraSound Classic
          soundcards.
@@ -221,6 +224,7 @@ config SND_GUSEXTREME
        select SND_OPL3_LIB
        select SND_MPU401_UART
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for Gravis UltraSound Extreme
          soundcards.
index 656ce39bddbc531c27d5a15b6d07e899e17f1cd6..8f6594a7d37f7b940ded27f6a8a0395ea8fa1442 100644 (file)
@@ -155,6 +155,7 @@ config SND_AZT3328
        select SND_PCM
        select SND_RAWMIDI
        select SND_AC97_CODEC
+       select SND_TIMER
        depends on ZONE_DMA
        help
          Say Y here to include support for Aztech AZF3328 (PCI168)
@@ -463,6 +464,7 @@ config SND_EMU10K1
        select SND_HWDEP
        select SND_RAWMIDI
        select SND_AC97_CODEC
+       select SND_TIMER
        depends on ZONE_DMA
        help
          Say Y to include support for Sound Blaster PCI 512, Live!,
@@ -889,6 +891,7 @@ config SND_YMFPCI
        select SND_OPL3_LIB
        select SND_MPU401_UART
        select SND_AC97_CODEC
+       select SND_TIMER
        help
          Say Y here to include support for Yamaha PCI audio chips -
          YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754.
index 28e2f8b42f5e8e4e7f3aaaaa88f45d6834b02591..891453451543102994b7912584f00483b2c472f0 100644 (file)
@@ -1141,6 +1141,14 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
                emu->emu1010.firmware_thread =
                        kthread_create(emu1010_firmware_thread, emu,
                                       "emu1010_firmware");
+               if (IS_ERR(emu->emu1010.firmware_thread)) {
+                       err = PTR_ERR(emu->emu1010.firmware_thread);
+                       emu->emu1010.firmware_thread = NULL;
+                       dev_info(emu->card->dev,
+                                       "emu1010: Creating thread failed\n");
+                       return err;
+               }
+
                wake_up_process(emu->emu1010.firmware_thread);
        }
 
index 30c8efe0f80a3a456bbbe09b6ce05abf810a8552..7ca5b89f088a6922e6acd09c12befae864994320 100644 (file)
@@ -4028,9 +4028,9 @@ static void pin_power_callback(struct hda_codec *codec,
                               struct hda_jack_callback *jack,
                               bool on)
 {
-       if (jack && jack->tbl->nid)
+       if (jack && jack->nid)
                sync_power_state_change(codec,
-                                       set_pin_power_jack(codec, jack->tbl->nid, on));
+                                       set_pin_power_jack(codec, jack->nid, on));
 }
 
 /* callback only doing power up -- called at first */
index 256e6cda218f939e6e6d09201c10d39827825ce9..4045dca3d699edd13f06aa9ab7c095948c932a55 100644 (file)
@@ -90,6 +90,8 @@ enum {
 #define NVIDIA_HDA_ENABLE_COHBIT      0x01
 
 /* Defines for Intel SCH HDA snoop control */
+#define INTEL_HDA_CGCTL         0x48
+#define INTEL_HDA_CGCTL_MISCBDCGE        (0x1 << 6)
 #define INTEL_SCH_HDA_DEVC      0x78
 #define INTEL_SCH_HDA_DEVC_NOSNOOP       (0x1<<11)
 
@@ -534,10 +536,21 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
 {
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
+       u32 val;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                snd_hdac_set_codec_wakeup(bus, true);
+       if (IS_BROXTON(pci)) {
+               pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+               val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
+               pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+       }
        azx_init_chip(chip, full_reset);
+       if (IS_BROXTON(pci)) {
+               pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+               val = val | INTEL_HDA_CGCTL_MISCBDCGE;
+               pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+       }
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                snd_hdac_set_codec_wakeup(bus, false);
 
index c945e257d368890bcd3f72037568e566c0ba9221..a33234e04d4f7a1fe311ce7db50f1a56e9e1d7d4 100644 (file)
@@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
                if (!callback)
                        return ERR_PTR(-ENOMEM);
                callback->func = func;
-               callback->tbl = jack;
+               callback->nid = jack->nid;
                callback->next = jack->callback;
                jack->callback = callback;
        }
index 858708a044f57ef563d79a100c5a92e5f3adaa3d..e9814c0168ea5d77da2922454ed4256d7ad2a30a 100644 (file)
@@ -21,7 +21,7 @@ struct hda_jack_callback;
 typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
 
 struct hda_jack_callback {
-       struct hda_jack_tbl *tbl;
+       hda_nid_t nid;
        hda_jack_callback_fn func;
        unsigned int private_data;      /* arbitrary data */
        struct hda_jack_callback *next;
index 4ef2259f88cae3b1cf328880447dbf528ad42a95..9ceb2bc36e68026bd02dfca8bdca4f9ffab2e849 100644 (file)
@@ -4427,13 +4427,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
 static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
 {
        struct ca0132_spec *spec = codec->spec;
+       struct hda_jack_tbl *tbl;
 
        /* Delay enabling the HP amp, to let the mic-detection
         * state machine run.
         */
        cancel_delayed_work_sync(&spec->unsol_hp_work);
        schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
-       cb->tbl->block_report = 1;
+       tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+       if (tbl)
+               tbl->block_report = 1;
 }
 
 static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
index a12ae8ac091451261a2ba613543677fabedb8cd0..c1c855a6c0af8199d03b04419d16d3494507ddeb 100644 (file)
@@ -614,6 +614,7 @@ enum {
        CS4208_MAC_AUTO,
        CS4208_MBA6,
        CS4208_MBP11,
+       CS4208_MACMINI,
        CS4208_GPIO0,
 };
 
@@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = {
        { .id = CS4208_GPIO0, .name = "gpio0" },
        { .id = CS4208_MBA6, .name = "mba6" },
        { .id = CS4208_MBP11, .name = "mbp11" },
+       { .id = CS4208_MACMINI, .name = "macmini" },
        {}
 };
 
@@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
 /* codec SSID matching */
 static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+       SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
        SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
        SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
        SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
@@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
        snd_hda_apply_fixup(codec, action);
 }
 
+/* MacMini 7,1 has the inverted jack detection */
+static void cs4208_fixup_macmini(struct hda_codec *codec,
+                                const struct hda_fixup *fix, int action)
+{
+       static const struct hda_pintbl pincfgs[] = {
+               { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
+               { 0x21, 0x004be140 }, /* SPDIF: disable detect */
+               { }
+       };
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               /* HP pin (0x10) has an inverted detection */
+               codec->inv_jack_detect = 1;
+               /* disable the bogus Mic and SPDIF jack detections */
+               snd_hda_apply_pincfgs(codec, pincfgs);
+       }
+}
+
 static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
                               struct snd_ctl_elem_value *ucontrol)
 {
@@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = {
                .chained = true,
                .chain_id = CS4208_GPIO0,
        },
+       [CS4208_MACMINI] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs4208_fixup_macmini,
+               .chained = true,
+               .chain_id = CS4208_GPIO0,
+       },
        [CS4208_GPIO0] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs4208_fixup_gpio0,
index 426a29a1c19bbda5841bd9d4574bf66e83cb8b76..8ee78dbd4c6054065186501b59ecd957c25285a8 100644 (file)
@@ -448,7 +448,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
        eld = &per_pin->sink_eld;
 
        mutex_lock(&per_pin->lock);
-       if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+       if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
+           eld->eld_size > ELD_MAX_SIZE) {
                mutex_unlock(&per_pin->lock);
                snd_BUG();
                return -EINVAL;
@@ -1193,7 +1194,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
 static void jack_callback(struct hda_codec *codec,
                          struct hda_jack_callback *jack)
 {
-       check_presence_and_report(codec, jack->tbl->nid);
+       check_presence_and_report(codec, jack->nid);
 }
 
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -3653,6 +3654,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",     patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",   patch_via_hdmi),
 HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP",   patch_via_hdmi),
index 33753244f48fe98de3a57709de2793abb74f2ae0..efd4980cffb8a0273228ac7570315f0f1aa1dbf9 100644 (file)
@@ -282,7 +282,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
        uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
        if (!uctl)
                return;
-       val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
+       val = snd_hda_codec_read(codec, jack->nid, 0,
                                 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
        val &= HDA_AMP_VOLMASK;
        uctl->value.integer.value[0] = val;
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0292:
                alc_update_coef_idx(codec, 0x4, 1<<15, 0);
                break;
+       case 0x10ec0225:
        case 0x10ec0233:
        case 0x10ec0255:
        case 0x10ec0256:
@@ -900,6 +901,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
        { 0x10ec0899, 0x1028, 0, "ALC3861" },
        { 0x10ec0298, 0x1028, 0, "ALC3266" },
        { 0x10ec0256, 0x1028, 0, "ALC3246" },
+       { 0x10ec0225, 0x1028, 0, "ALC3253" },
        { 0x10ec0670, 0x1025, 0, "ALC669X" },
        { 0x10ec0676, 0x1025, 0, "ALC679X" },
        { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -1785,7 +1787,6 @@ enum {
        ALC882_FIXUP_NO_PRIMARY_HP,
        ALC887_FIXUP_ASUS_BASS,
        ALC887_FIXUP_BASS_CHMAP,
-       ALC882_FIXUP_DISABLE_AAMIX,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1947,8 +1948,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
 
 static void alc_fixup_bass_chmap(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action);
-static void alc_fixup_disable_aamix(struct hda_codec *codec,
-                                   const struct hda_fixup *fix, int action);
 
 static const struct hda_fixup alc882_fixups[] = {
        [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2186,10 +2185,6 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_bass_chmap,
        },
-       [ALC882_FIXUP_DISABLE_AAMIX] = {
-               .type = HDA_FIXUP_FUNC,
-               .v.func = alc_fixup_disable_aamix,
-       },
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2228,6 +2223,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
        SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
        SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
 
        /* All Apple entries are in codec SSIDs */
        SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2257,7 +2253,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2651,6 +2646,7 @@ enum {
        ALC269_TYPE_ALC298,
        ALC269_TYPE_ALC255,
        ALC269_TYPE_ALC256,
+       ALC269_TYPE_ALC225,
 };
 
 /*
@@ -2680,6 +2676,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC298:
        case ALC269_TYPE_ALC255:
        case ALC269_TYPE_ALC256:
+       case ALC269_TYPE_ALC225:
                ssids = alc269_ssids;
                break;
        default:
@@ -3658,6 +3655,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
                WRITE_COEF(0xb7, 0x802b),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x4a, 1<<8, 0),
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+               UPDATE_COEF(0x63, 3<<14, 3<<14),
+               UPDATE_COEF(0x4a, 3<<4, 2<<4),
+               UPDATE_COEF(0x4a, 3<<10, 3<<10),
+               UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+               UPDATE_COEF(0x4a, 3<<10, 0),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3682,6 +3689,9 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0668);
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        }
        codec_dbg(codec, "Headset jack set to unplugged mode.\n");
 }
@@ -3727,6 +3737,13 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                UPDATE_COEF(0xc3, 0, 1<<12),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
+               UPDATE_COEF(0x4a, 3<<4, 2<<4),
+               UPDATE_COEF(0x63, 3<<14, 0),
+               {}
+       };
+
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3772,6 +3789,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                alc_process_coef_fw(codec, coef0688);
                snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
                break;
+       case 0x10ec0225:
+               alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
+               snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+               alc_process_coef_fw(codec, coef0225);
+               snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+               break;
        }
        codec_dbg(codec, "Headset jack set to mic-in mode.\n");
 }
@@ -3884,6 +3907,13 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
                WRITE_COEF(0xc3, 0x0000),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
+               UPDATE_COEF(0x49, 1<<8, 1<<8),
+               UPDATE_COEF(0x4a, 7<<6, 7<<6),
+               UPDATE_COEF(0x4a, 3<<4, 3<<4),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3912,6 +3942,9 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0688);
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        }
        codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
 }
@@ -3955,6 +3988,13 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
                WRITE_COEF(0xc3, 0x0000),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
+               UPDATE_COEF(0x49, 1<<8, 1<<8),
+               UPDATE_COEF(0x4a, 7<<6, 7<<6),
+               UPDATE_COEF(0x4a, 3<<4, 3<<4),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3983,6 +4023,9 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0688);
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        }
        codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
 }
@@ -4014,6 +4057,11 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                WRITE_COEF(0xc3, 0x0c00),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+               UPDATE_COEF(0x49, 1<<8, 1<<8),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -4058,6 +4106,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                val = alc_read_coef_idx(codec, 0xbe);
                is_ctia = (val & 0x1c02) == 0x1c02;
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               msleep(800);
+               val = alc_read_coef_idx(codec, 0x46);
+               is_ctia = (val & 0x00f0) == 0x00f0;
+               break;
        }
 
        codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
@@ -5560,6 +5614,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
        {}
 };
+#define ALC225_STANDARD_PINS \
+       {0x12, 0xb7a60130}, \
+       {0x21, 0x04211020}
 
 #define ALC256_STANDARD_PINS \
        {0x12, 0x90a60140}, \
@@ -5581,6 +5638,12 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x14, 0x901701a0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x14, 0x901701b0}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -5906,6 +5969,9 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
                break;
+       case 0x10ec0225:
+               spec->codec_variant = ALC269_TYPE_ALC225;
+               break;
        }
 
        if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6796,6 +6862,7 @@ static int patch_alc680(struct hda_codec *codec)
  */
 static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
index 2c7c5eb8b1e9514779b78bc278412803862062e9..37b70f8e878f715968c7496e01eeb493f8a2b7a4 100644 (file)
@@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
        if (!spec->num_pwrs)
                return;
 
-       if (jack && jack->tbl->nid) {
-               stac_toggle_power_map(codec, jack->tbl->nid,
-                                     snd_hda_jack_detect(codec, jack->tbl->nid),
+       if (jack && jack->nid) {
+               stac_toggle_power_map(codec, jack->nid,
+                                     snd_hda_jack_detect(codec, jack->nid),
                                      true);
                return;
        }
index 0095a80a997fc157e97b70d3c12051d65d0ee4ab..a5843fc5ff204ee6efb32becd35e671927903f59 100644 (file)
@@ -34,7 +34,6 @@
 #include "pmac.h"
 #include <sound/pcm_params.h>
 #include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
 
 
 /* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */
index d75deba5617db05dc02407b6c669d61ee1695a5e..dfcd38647606445d7dab130178c7ca4f765ff33c 100644 (file)
@@ -22,6 +22,7 @@ config SND_SUN_AMD7930
 config SND_SUN_CS4231
        tristate "Sun CS4231"
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for CS4231 sound device on Sun.
 
index 23ea6d800c4c5283adc27d88fd26e74feaf066e0..4f6ce1cac8e20ef8b504073a6dfcc6236c0cf90c 100644 (file)
@@ -1121,6 +1121,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        switch (chip->usb_id) {
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+       case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
@@ -1205,8 +1206,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
         * "Playback Design" products need a 50ms delay after setting the
         * USB interface.
         */
-       if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
+       switch (le16_to_cpu(dev->descriptor.idVendor)) {
+       case 0x23ba: /* Playback Design */
+       case 0x0644: /* TEAC Corp. */
                mdelay(50);
+               break;
+       }
 }
 
 void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
@@ -1221,6 +1226,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(20);
 
+       /*
+        * "TEAC Corp." products need a 20ms delay after each
+        * class compliant request
+        */
+       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               mdelay(20);
+
        /* Marantz/Denon devices with USB DAC functionality need a delay
         * after each class compliant request
         */
@@ -1269,7 +1282,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
        case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
        case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
-       case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
+       case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
@@ -1278,6 +1291,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
        case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
+       case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index 0a22407e1d7d8abb092ff3d24b6829a27c234bbe..5d34815c7ccb8e02ac6d4db0d82d4712af73f343 100644 (file)
@@ -77,6 +77,9 @@ include config/utilities.mak
 # Define NO_AUXTRACE if you do not want AUX area tracing support
 #
 # Define NO_LIBBPF if you do not want BPF support
+#
+# Define FEATURES_DUMP to provide features detection dump file
+# and bypass the feature detection
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -166,6 +169,15 @@ ifeq ($(config),1)
 include config/Makefile
 endif
 
+# The FEATURE_DUMP_EXPORT holds location of the actual
+# FEATURE_DUMP file to be used to bypass feature detection
+# (for bpf or any other subproject)
+ifeq ($(FEATURES_DUMP),)
+FEATURE_DUMP_EXPORT := $(realpath $(OUTPUT)FEATURE-DUMP)
+else
+FEATURE_DUMP_EXPORT := $(FEATURES_DUMP)
+endif
+
 export prefix bindir sharedir sysconfdir DESTDIR
 
 # sparse is architecture-neutral, which means that we need to tell it
@@ -436,7 +448,7 @@ $(LIBAPI)-clean:
        $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
 
 $(LIBBPF): fixdep FORCE
-       $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(realpath $(OUTPUT)FEATURE-DUMP)
+       $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
 $(LIBBPF)-clean:
        $(call QUIET_CLEAN, libbpf)
@@ -610,6 +622,17 @@ clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean
        $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
        $(python-clean)
 
+#
+# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
+# file if defined, with no further action.
+feature-dump:
+ifdef FEATURE_DUMP_COPY
+       @cp $(OUTPUT)FEATURE-DUMP $(FEATURE_DUMP_COPY)
+       @echo "FEATURE-DUMP file copied into $(FEATURE_DUMP_COPY)"
+else
+       @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
+endif
+
 #
 # Trick: if ../../.git does not exist - we are building out of tree for example,
 # then force version regeneration:
index 3e89ba825f6bf3b3f8b15b9110564c63283998b4..7f064eb371589aa887e112cddb54145d6268141b 100644 (file)
@@ -17,7 +17,7 @@ static pid_t spawn(void)
        if (pid)
                return pid;
 
-       while(1);
+       while(1)
                sleep(5);
        return 0;
 }
index e5959c136a1907ee9905f044ae2e7786cb47d075..511141b102e8464fe3e102007f792220306ad3b9 100644 (file)
@@ -181,7 +181,11 @@ LDFLAGS += -Wl,-z,noexecstack
 
 EXTLIBS = -lpthread -lrt -lm -ldl
 
+ifeq ($(FEATURES_DUMP),)
 include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
 
 ifeq ($(feature-stackprotector-all), 1)
   CFLAGS += -fstack-protector-all
index df38decc48c32df72e9dfa86ef3f7e7b035322e7..f918015512af96d1173891dac2e8f460ff65ca85 100644 (file)
@@ -5,7 +5,7 @@ ifeq ($(MAKECMDGOALS),)
 # no target specified, trigger the whole suite
 all:
        @echo "Testing Makefile";      $(MAKE) -sf tests/make MK=Makefile
-       @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf
+       @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf SET_PARALLEL=1 SET_O=1
 else
 # run only specific test over 'Makefile'
 %:
@@ -13,6 +13,26 @@ else
 endif
 else
 PERF := .
+PERF_O := $(PERF)
+O_OPT :=
+
+ifneq ($(O),)
+  FULL_O := $(shell readlink -f $(O) || echo $(O))
+  PERF_O := $(FULL_O)
+  ifeq ($(SET_O),1)
+    O_OPT := 'O=$(FULL_O)'
+  endif
+  K_O_OPT := 'O=$(FULL_O)'
+endif
+
+PARALLEL_OPT=
+ifeq ($(SET_PARALLEL),1)
+  cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+  ifeq ($(cores),0)
+    cores := 1
+  endif
+  PARALLEL_OPT="-j$(cores)"
+endif
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -156,11 +176,11 @@ test_make_doc    := $(test_ok)
 test_make_help_O := $(test_ok)
 test_make_doc_O  := $(test_ok)
 
-test_make_python_perf_so := test -f $(PERF)/python/perf.so
+test_make_python_perf_so := test -f $(PERF_O)/python/perf.so
 
-test_make_perf_o           := test -f $(PERF)/perf.o
-test_make_util_map_o       := test -f $(PERF)/util/map.o
-test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o
+test_make_perf_o           := test -f $(PERF_O)/perf.o
+test_make_util_map_o       := test -f $(PERF_O)/util/map.o
+test_make_util_pmu_bison_o := test -f $(PERF_O)/util/pmu-bison.o
 
 define test_dest_files
   for file in $(1); do                         \
@@ -227,7 +247,7 @@ test_make_perf_o_O            := test -f $$TMP_O/perf.o
 test_make_util_map_o_O        := test -f $$TMP_O/util/map.o
 test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
 
-test_default = test -x $(PERF)/perf
+test_default = test -x $(PERF_O)/perf
 test = $(if $(test_$1),$(test_$1),$(test_default))
 
 test_default_O = test -x $$TMP_O/perf
@@ -247,12 +267,12 @@ endif
 
 MAKEFLAGS := --no-print-directory
 
-clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null)
+clean := @(cd $(PERF); make -s -f $(MK) $(O_OPT) clean >/dev/null)
 
 $(run):
        $(call clean)
        @TMP_DEST=$$(mktemp -d); \
-       cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \
+       cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST $($@)"; \
        echo "- $@: $$cmd" && echo $$cmd > $@ && \
        ( eval $$cmd ) >> $@ 2>&1; \
        echo "  test: $(call test,$@)" >> $@ 2>&1; \
@@ -263,7 +283,7 @@ $(run_O):
        $(call clean)
        @TMP_O=$$(mktemp -d); \
        TMP_DEST=$$(mktemp -d); \
-       cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
+       cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
        echo "- $@: $$cmd" && echo $$cmd > $@ && \
        ( eval $$cmd ) >> $@ 2>&1 && \
        echo "  test: $(call test_O,$@)" >> $@ 2>&1; \
@@ -276,17 +296,22 @@ tarpkg:
        ( eval $$cmd ) >> $@ 2>&1 && \
        rm -f $@
 
+KERNEL_O := ../..
+ifneq ($(O),)
+  KERNEL_O := $(O)
+endif
+
 make_kernelsrc:
-       @echo "- make -C <kernelsrc> tools/perf"
+       @echo "- make -C <kernelsrc> $(PARALLEL_OPT) $(K_O_OPT) tools/perf"
        $(call clean); \
-       (make -C ../.. tools/perf) > $@ 2>&1 && \
-       test -x perf && rm -f $@ || (cat $@ ; false)
+       (make -C ../.. $(PARALLEL_OPT) $(K_O_OPT) tools/perf) > $@ 2>&1 && \
+       test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
 
 make_kernelsrc_tools:
-       @echo "- make -C <kernelsrc>/tools perf"
+       @echo "- make -C <kernelsrc>/tools $(PARALLEL_OPT) $(K_O_OPT) perf"
        $(call clean); \
-       (make -C ../../tools perf) > $@ 2>&1 && \
-       test -x perf && rm -f $@ || (cat $@ ; false)
+       (make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
+       test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
 
 all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
        @echo OK
index d4d7cc27252f1184bf2d678b6460f138099efd28..718bd46d47fa7bc88674a192dd1a8e8ab1fb7ca9 100644 (file)
@@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
                                nd = browser->curr_hot;
                        break;
                case K_UNTAB:
-                       if (nd != NULL)
+                       if (nd != NULL) {
                                nd = rb_next(nd);
                                if (nd == NULL)
                                        nd = rb_first(&browser->entries);
-                       else
+                       else
                                nd = browser->curr_hot;
                        break;
                case K_F1:
index c226303e3da045743fe676c7c2ffc0ff13eac674..68a7612019dc3c3be315604693b68170183044d6 100644 (file)
@@ -131,6 +131,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
                        symlen = unresolved_col_width + 4 + 2;
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
                                           symlen);
+                       hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+                                          symlen);
                }
 
                if (h->mem_info->iaddr.sym) {
index d5636ba94b20f722d3f521614e8fae08b620fad4..40b7a0d0905b8d7f01972c6e38e225f108b15133 100644 (file)
@@ -1149,7 +1149,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
 
                machine = machines__find(machines, pid);
                if (!machine)
-                       machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
+                       machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
                return machine;
        }
 
index 2f901d15e06370d26d579869d351811f4d31f940..2b58edccd56f8bf63c0364d17b5ada656f4f582c 100644 (file)
@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
        int i, ret;
 
        aggr->val = aggr->ena = aggr->run = 0;
-       init_stats(ps->res_stats);
 
        if (counter->per_pkg)
                zero_per_pkg(counter);
index 3b2de6eb33763b0ab1a23195529fda8d6367eb1d..ab02209a7cf3b162431bfc006287f5fbd8c68f43 100644 (file)
@@ -1466,7 +1466,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
         */
-       if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
+       if (filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
                dso__set_build_id(dso, build_id);
 
        /*
index 7ec7df9e7fc731ab7e118425b5bd01acaab349d5..0c1a7e65bb815a692f9b20cd97b337c181699308 100644 (file)
@@ -113,7 +113,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
 }
 EXPORT_SYMBOL(__wrap_devm_memremap_pages);
 
-pfn_t __wrap_phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
+pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 {
        struct nfit_test_resource *nfit_res = get_nfit_res(addr);
 
index e86d937cc22c04cb36aecfe4fb4af0c5deed3825..60fe3c569bd90f9cbfdc9664720c598e9430e8a3 100644 (file)
@@ -45,7 +45,17 @@ static inline int ksft_exit_fail(void)
 }
 #endif
 
-#define NSEC_PER_SEC 1000000000L
+#define NSEC_PER_SEC 1000000000LL
+#define USEC_PER_SEC 1000000LL
+
+#define ADJ_SETOFFSET 0x0100
+
+#include <sys/syscall.h>
+static int clock_adjtime(clockid_t id, struct timex *tx)
+{
+       return syscall(__NR_clock_adjtime, id, tx);
+}
+
 
 /* clear NTP time_status & time_state */
 int clear_time_state(void)
@@ -193,10 +203,137 @@ out:
 }
 
 
+int set_offset(long long offset, int use_nano)
+{
+       struct timex tmx = {};
+       int ret;
+
+       tmx.modes = ADJ_SETOFFSET;
+       if (use_nano) {
+               tmx.modes |= ADJ_NANO;
+
+               tmx.time.tv_sec = offset / NSEC_PER_SEC;
+               tmx.time.tv_usec = offset % NSEC_PER_SEC;
+
+               if (offset < 0 && tmx.time.tv_usec) {
+                       tmx.time.tv_sec -= 1;
+                       tmx.time.tv_usec += NSEC_PER_SEC;
+               }
+       } else {
+               tmx.time.tv_sec = offset / USEC_PER_SEC;
+               tmx.time.tv_usec = offset % USEC_PER_SEC;
+
+               if (offset < 0 && tmx.time.tv_usec) {
+                       tmx.time.tv_sec -= 1;
+                       tmx.time.tv_usec += USEC_PER_SEC;
+               }
+       }
+
+       ret = clock_adjtime(CLOCK_REALTIME, &tmx);
+       if (ret < 0) {
+               printf("(sec: %ld  usec: %ld) ", tmx.time.tv_sec, tmx.time.tv_usec);
+               printf("[FAIL]\n");
+               return -1;
+       }
+       return 0;
+}
+
+int set_bad_offset(long sec, long usec, int use_nano)
+{
+       struct timex tmx = {};
+       int ret;
+
+       tmx.modes = ADJ_SETOFFSET;
+       if (use_nano)
+               tmx.modes |= ADJ_NANO;
+
+       tmx.time.tv_sec = sec;
+       tmx.time.tv_usec = usec;
+       ret = clock_adjtime(CLOCK_REALTIME, &tmx);
+       if (ret >= 0) {
+               printf("Invalid (sec: %ld  usec: %ld) did not fail! ", tmx.time.tv_sec, tmx.time.tv_usec);
+               printf("[FAIL]\n");
+               return -1;
+       }
+       return 0;
+}
+
+int validate_set_offset(void)
+{
+       printf("Testing ADJ_SETOFFSET... ");
+
+       /* Test valid values */
+       if (set_offset(NSEC_PER_SEC - 1, 1))
+               return -1;
+
+       if (set_offset(-NSEC_PER_SEC + 1, 1))
+               return -1;
+
+       if (set_offset(-NSEC_PER_SEC - 1, 1))
+               return -1;
+
+       if (set_offset(5 * NSEC_PER_SEC, 1))
+               return -1;
+
+       if (set_offset(-5 * NSEC_PER_SEC, 1))
+               return -1;
+
+       if (set_offset(5 * NSEC_PER_SEC + NSEC_PER_SEC / 2, 1))
+               return -1;
+
+       if (set_offset(-5 * NSEC_PER_SEC - NSEC_PER_SEC / 2, 1))
+               return -1;
+
+       if (set_offset(USEC_PER_SEC - 1, 0))
+               return -1;
+
+       if (set_offset(-USEC_PER_SEC + 1, 0))
+               return -1;
+
+       if (set_offset(-USEC_PER_SEC - 1, 0))
+               return -1;
+
+       if (set_offset(5 * USEC_PER_SEC, 0))
+               return -1;
+
+       if (set_offset(-5 * USEC_PER_SEC, 0))
+               return -1;
+
+       if (set_offset(5 * USEC_PER_SEC + USEC_PER_SEC / 2, 0))
+               return -1;
+
+       if (set_offset(-5 * USEC_PER_SEC - USEC_PER_SEC / 2, 0))
+               return -1;
+
+       /* Test invalid values */
+       if (set_bad_offset(0, -1, 1))
+               return -1;
+       if (set_bad_offset(0, -1, 0))
+               return -1;
+       if (set_bad_offset(0, 2 * NSEC_PER_SEC, 1))
+               return -1;
+       if (set_bad_offset(0, 2 * USEC_PER_SEC, 0))
+               return -1;
+       if (set_bad_offset(0, NSEC_PER_SEC, 1))
+               return -1;
+       if (set_bad_offset(0, USEC_PER_SEC, 0))
+               return -1;
+       if (set_bad_offset(0, -NSEC_PER_SEC, 1))
+               return -1;
+       if (set_bad_offset(0, -USEC_PER_SEC, 0))
+               return -1;
+
+       printf("[OK]\n");
+       return 0;
+}
+
 int main(int argc, char **argv)
 {
        if (validate_freq())
                return ksft_exit_fail();
 
+       if (validate_set_offset())
+               return ksft_exit_fail();
+
        return ksft_exit_pass();
 }
index 26b7926bda8849a8b931fff6750a3ed8217dae89..ba34f9e96efd6bd760870150dd9cb6ba0eacbd69 100644 (file)
@@ -1,15 +1,19 @@
 #if defined(__i386__) || defined(__x86_64__)
 #define barrier() asm volatile("" ::: "memory")
-#define mb() __sync_synchronize()
-
-#define smp_mb()       mb()
-# define dma_rmb()     barrier()
-# define dma_wmb()     barrier()
-# define smp_rmb()     barrier()
-# define smp_wmb()     barrier()
+#define virt_mb() __sync_synchronize()
+#define virt_rmb() barrier()
+#define virt_wmb() barrier()
+/* Atomic store should be enough, but gcc generates worse code in that case. */
+#define virt_store_mb(var, value)  do { \
+       typeof(var) virt_store_mb_value = (value); \
+       __atomic_exchange(&(var), &virt_store_mb_value, &virt_store_mb_value, \
+                         __ATOMIC_SEQ_CST); \
+       barrier(); \
+} while (0);
 /* Weak barriers should be used. If not - it's a bug */
-# define rmb() abort()
-# define wmb() abort()
+# define mb() abort()
+# define rmb() abort()
+# define wmb() abort()
 #else
 #error Please fill in barrier macros
 #endif
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
new file mode 100644 (file)
index 0000000..845960e
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef LINUX_COMPILER_H
+#define LINUX_COMPILER_H
+
+#define WRITE_ONCE(var, val) \
+       (*((volatile typeof(val) *)(&(var))) = (val))
+
+#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var))))
+
+#endif
index 4db7d5691ba71b33d2bda8c85d634fe3781ae6d6..0338499482159883bb3e30452d5afc0f77af577e 100644 (file)
@@ -8,6 +8,7 @@
 #include <assert.h>
 #include <stdarg.h>
 
+#include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
new file mode 100644 (file)
index 0000000..feaa64a
--- /dev/null
@@ -0,0 +1,22 @@
+all:
+
+all: ring virtio_ring_0_9 virtio_ring_poll
+
+CFLAGS += -Wall
+CFLAGS += -pthread -O2 -ggdb
+LDFLAGS += -pthread -O2 -ggdb
+
+main.o: main.c main.h
+ring.o: ring.c main.h
+virtio_ring_0_9.o: virtio_ring_0_9.c main.h
+virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h
+ring: ring.o main.o
+virtio_ring_0_9: virtio_ring_0_9.o main.o
+virtio_ring_poll: virtio_ring_poll.o main.o
+clean:
+       -rm main.o
+       -rm ring.o ring
+       -rm virtio_ring_0_9.o virtio_ring_0_9
+       -rm virtio_ring_poll.o virtio_ring_poll
+
+.PHONY: all clean
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README
new file mode 100644 (file)
index 0000000..34e94c4
--- /dev/null
@@ -0,0 +1,2 @@
+Partial implementation of various ring layouts, useful to tune virtio design.
+Uses shared memory heavily.
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
new file mode 100644 (file)
index 0000000..3a5ff43
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Command line processing and common functions for ring benchmarking.
+ */
+#define _GNU_SOURCE
+#include <getopt.h>
+#include <pthread.h>
+#include <assert.h>
+#include <sched.h>
+#include "main.h"
+#include <sys/eventfd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <limits.h>
+
+int runcycles = 10000000;
+int max_outstanding = INT_MAX;
+int batch = 1;
+
+bool do_sleep = false;
+bool do_relax = false;
+bool do_exit = true;
+
+unsigned ring_size = 256;
+
+static int kickfd = -1;
+static int callfd = -1;
+
+void notify(int fd)
+{
+       unsigned long long v = 1;
+       int r;
+
+       vmexit();
+       r = write(fd, &v, sizeof v);
+       assert(r == sizeof v);
+       vmentry();
+}
+
+void wait_for_notify(int fd)
+{
+       unsigned long long v = 1;
+       int r;
+
+       vmexit();
+       r = read(fd, &v, sizeof v);
+       assert(r == sizeof v);
+       vmentry();
+}
+
+void kick(void)
+{
+       notify(kickfd);
+}
+
+void wait_for_kick(void)
+{
+       wait_for_notify(kickfd);
+}
+
+void call(void)
+{
+       notify(callfd);
+}
+
+void wait_for_call(void)
+{
+       wait_for_notify(callfd);
+}
+
+void set_affinity(const char *arg)
+{
+       cpu_set_t cpuset;
+       int ret;
+       pthread_t self;
+       long int cpu;
+       char *endptr;
+
+       if (!arg)
+               return;
+
+       cpu = strtol(arg, &endptr, 0);
+       assert(!*endptr);
+
+       assert(cpu >= 0 || cpu < CPU_SETSIZE);
+
+       self = pthread_self();
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+
+       ret = pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset);
+       assert(!ret);
+}
+
+static void run_guest(void)
+{
+       int completed_before;
+       int completed = 0;
+       int started = 0;
+       int bufs = runcycles;
+       int spurious = 0;
+       int r;
+       unsigned len;
+       void *buf;
+       int tokick = batch;
+
+       for (;;) {
+               if (do_sleep)
+                       disable_call();
+               completed_before = completed;
+               do {
+                       if (started < bufs &&
+                           started - completed < max_outstanding) {
+                               r = add_inbuf(0, NULL, "Hello, world!");
+                               if (__builtin_expect(r == 0, true)) {
+                                       ++started;
+                                       if (!--tokick) {
+                                               tokick = batch;
+                                               if (do_sleep)
+                                                       kick_available();
+                                       }
+
+                               }
+                       } else
+                               r = -1;
+
+                       /* Flush out completed bufs if any */
+                       if (get_buf(&len, &buf)) {
+                               ++completed;
+                               if (__builtin_expect(completed == bufs, false))
+                                       return;
+                               r = 0;
+                       }
+               } while (r == 0);
+               if (completed == completed_before)
+                       ++spurious;
+               assert(completed <= bufs);
+               assert(started <= bufs);
+               if (do_sleep) {
+                       if (enable_call())
+                               wait_for_call();
+               } else {
+                       poll_used();
+               }
+       }
+}
+
+static void run_host(void)
+{
+       int completed_before;
+       int completed = 0;
+       int spurious = 0;
+       int bufs = runcycles;
+       unsigned len;
+       void *buf;
+
+       for (;;) {
+               if (do_sleep) {
+                       if (enable_kick())
+                               wait_for_kick();
+               } else {
+                       poll_avail();
+               }
+               if (do_sleep)
+                       disable_kick();
+               completed_before = completed;
+               while (__builtin_expect(use_buf(&len, &buf), true)) {
+                       if (do_sleep)
+                               call_used();
+                       ++completed;
+                       if (__builtin_expect(completed == bufs, false))
+                               return;
+               }
+               if (completed == completed_before)
+                       ++spurious;
+               assert(completed <= bufs);
+               if (completed == bufs)
+                       break;
+       }
+}
+
+void *start_guest(void *arg)
+{
+       set_affinity(arg);
+       run_guest();
+       pthread_exit(NULL);
+}
+
+void *start_host(void *arg)
+{
+       set_affinity(arg);
+       run_host();
+       pthread_exit(NULL);
+}
+
+static const char optstring[] = "";
+static const struct option longopts[] = {
+       {
+               .name = "help",
+               .has_arg = no_argument,
+               .val = 'h',
+       },
+       {
+               .name = "host-affinity",
+               .has_arg = required_argument,
+               .val = 'H',
+       },
+       {
+               .name = "guest-affinity",
+               .has_arg = required_argument,
+               .val = 'G',
+       },
+       {
+               .name = "ring-size",
+               .has_arg = required_argument,
+               .val = 'R',
+       },
+       {
+               .name = "run-cycles",
+               .has_arg = required_argument,
+               .val = 'C',
+       },
+       {
+               .name = "outstanding",
+               .has_arg = required_argument,
+               .val = 'o',
+       },
+       {
+               .name = "batch",
+               .has_arg = required_argument,
+               .val = 'b',
+       },
+       {
+               .name = "sleep",
+               .has_arg = no_argument,
+               .val = 's',
+       },
+       {
+               .name = "relax",
+               .has_arg = no_argument,
+               .val = 'x',
+       },
+       {
+               .name = "exit",
+               .has_arg = no_argument,
+               .val = 'e',
+       },
+       {
+       }
+};
+
+static void help(void)
+{
+       fprintf(stderr, "Usage: <test> [--help]"
+               " [--host-affinity H]"
+               " [--guest-affinity G]"
+               " [--ring-size R (default: %d)]"
+               " [--run-cycles C (default: %d)]"
+               " [--batch b]"
+               " [--outstanding o]"
+               " [--sleep]"
+               " [--relax]"
+               " [--exit]"
+               "\n",
+               ring_size,
+               runcycles);
+}
+
+int main(int argc, char **argv)
+{
+       int ret;
+       pthread_t host, guest;
+       void *tret;
+       char *host_arg = NULL;
+       char *guest_arg = NULL;
+       char *endptr;
+       long int c;
+
+       kickfd = eventfd(0, 0);
+       assert(kickfd >= 0);
+       callfd = eventfd(0, 0);
+       assert(callfd >= 0);
+
+       for (;;) {
+               int o = getopt_long(argc, argv, optstring, longopts, NULL);
+               switch (o) {
+               case -1:
+                       goto done;
+               case '?':
+                       help();
+                       exit(2);
+               case 'H':
+                       host_arg = optarg;
+                       break;
+               case 'G':
+                       guest_arg = optarg;
+                       break;
+               case 'R':
+                       ring_size = strtol(optarg, &endptr, 0);
+                       assert(ring_size && !(ring_size & (ring_size - 1)));
+                       assert(!*endptr);
+                       break;
+               case 'C':
+                       c = strtol(optarg, &endptr, 0);
+                       assert(!*endptr);
+                       assert(c > 0 && c < INT_MAX);
+                       runcycles = c;
+                       break;
+               case 'o':
+                       c = strtol(optarg, &endptr, 0);
+                       assert(!*endptr);
+                       assert(c > 0 && c < INT_MAX);
+                       max_outstanding = c;
+                       break;
+               case 'b':
+                       c = strtol(optarg, &endptr, 0);
+                       assert(!*endptr);
+                       assert(c > 0 && c < INT_MAX);
+                       batch = c;
+                       break;
+               case 's':
+                       do_sleep = true;
+                       break;
+               case 'x':
+                       do_relax = true;
+                       break;
+               case 'e':
+                       do_exit = true;
+                       break;
+               default:
+                       help();
+                       exit(4);
+                       break;
+               }
+       }
+
+       /* does nothing here, used to make sure all smp APIs compile */
+       smp_acquire();
+       smp_release();
+       smp_mb();
+done:
+
+       if (batch > max_outstanding)
+               batch = max_outstanding;
+
+       if (optind < argc) {
+               help();
+               exit(4);
+       }
+       alloc_ring();
+
+       ret = pthread_create(&host, NULL, start_host, host_arg);
+       assert(!ret);
+       ret = pthread_create(&guest, NULL, start_guest, guest_arg);
+       assert(!ret);
+
+       ret = pthread_join(guest, &tret);
+       assert(!ret);
+       ret = pthread_join(host, &tret);
+       assert(!ret);
+       return 0;
+}
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
new file mode 100644 (file)
index 0000000..16917ac
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Common macros and functions for ring benchmarking.
+ */
+#ifndef MAIN_H
+#define MAIN_H
+
+#include <stdbool.h>
+
+extern bool do_exit;
+
+#if defined(__x86_64__) || defined(__i386__)
+#include "x86intrin.h"
+
+static inline void wait_cycles(unsigned long long cycles)
+{
+       unsigned long long t;
+
+       t = __rdtsc();
+       while (__rdtsc() - t < cycles) {}
+}
+
+#define VMEXIT_CYCLES 500
+#define VMENTRY_CYCLES 500
+
+#else
+static inline void wait_cycles(unsigned long long cycles)
+{
+       _Exit(5);
+}
+#define VMEXIT_CYCLES 0
+#define VMENTRY_CYCLES 0
+#endif
+
+static inline void vmexit(void)
+{
+       if (!do_exit)
+               return;
+       
+       wait_cycles(VMEXIT_CYCLES);
+}
+static inline void vmentry(void)
+{
+       if (!do_exit)
+               return;
+       
+       wait_cycles(VMENTRY_CYCLES);
+}
+
+/* implemented by ring */
+void alloc_ring(void);
+/* guest side */
+int add_inbuf(unsigned, void *, void *);
+void *get_buf(unsigned *, void **);
+void disable_call();
+bool enable_call();
+void kick_available();
+void poll_used();
+/* host side */
+void disable_kick();
+bool enable_kick();
+bool use_buf(unsigned *, void **);
+void call_used();
+void poll_avail();
+
+/* implemented by main */
+extern bool do_sleep;
+void kick(void);
+void wait_for_kick(void);
+void call(void);
+void wait_for_call(void);
+
+extern unsigned ring_size;
+
+/* Compiler barrier - similar to what Linux uses */
+#define barrier() asm volatile("" ::: "memory")
+
+/* Is there a portable way to do this? */
+#if defined(__x86_64__) || defined(__i386__)
+#define cpu_relax() asm ("rep; nop" ::: "memory")
+#else
+#define cpu_relax() assert(0)
+#endif
+
+extern bool do_relax;
+
+static inline void busy_wait(void)
+{
+       if (do_relax)
+               cpu_relax();
+       else
+               /* prevent compiler from removing busy loops */
+               barrier();
+} 
+
+/*
+ * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
+ * with other __ATOMIC_SEQ_CST calls.
+ */
+#define smp_mb() __sync_synchronize()
+
+/*
+ * This abuses the atomic builtins for thread fences, and
+ * adds a compiler barrier.
+ */
+#define smp_release() do { \
+    barrier(); \
+    __atomic_thread_fence(__ATOMIC_RELEASE); \
+} while (0)
+
+#define smp_acquire() do { \
+    __atomic_thread_fence(__ATOMIC_ACQUIRE); \
+    barrier(); \
+} while (0)
+
+#endif
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
new file mode 100644 (file)
index 0000000..c25c8d2
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
+ * signalling, unconditionally.
+ */
+#define _GNU_SOURCE
+#include "main.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/* Next - Where next entry will be written.
+ * Prev - "Next" value when event triggered previously.
+ * Event - Peer requested event after writing this entry.
+ */
+static inline bool need_event(unsigned short event,
+                             unsigned short next,
+                             unsigned short prev)
+{
+       return (unsigned short)(next - event - 1) < (unsigned short)(next - prev);
+}
+
+/* Design:
+ * Guest adds descriptors with unique index values and DESC_HW in flags.
+ * Host overwrites used descriptors with correct len, index, and DESC_HW clear.
+ * Flags are always set last.
+ */
+#define DESC_HW 0x1
+
+struct desc {
+       unsigned short flags;
+       unsigned short index;
+       unsigned len;
+       unsigned long long addr;
+};
+
+/* how much padding is needed to avoid false cache sharing */
+#define HOST_GUEST_PADDING 0x80
+
+/* Mostly read */
+struct event {
+       unsigned short kick_index;
+       unsigned char reserved0[HOST_GUEST_PADDING - 2];
+       unsigned short call_index;
+       unsigned char reserved1[HOST_GUEST_PADDING - 2];
+};
+
+struct data {
+       void *buf; /* descriptor is writeable, we can't get buf from there */
+       void *data;
+} *data;
+
+struct desc *ring;
+struct event *event;
+
+struct guest {
+       unsigned avail_idx;
+       unsigned last_used_idx;
+       unsigned num_free;
+       unsigned kicked_avail_idx;
+       unsigned char reserved[HOST_GUEST_PADDING - 12];
+} guest;
+
+struct host {
+       /* we do not need to track last avail index
+        * unless we have more than one in flight.
+        */
+       unsigned used_idx;
+       unsigned called_used_idx;
+       unsigned char reserved[HOST_GUEST_PADDING - 4];
+} host;
+
+/* implemented by ring */
+void alloc_ring(void)
+{
+       int ret;
+       int i;
+
+       ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
+       if (ret) {
+               perror("Unable to allocate ring buffer.\n");
+               exit(3);
+       }
+       event = malloc(sizeof *event);
+       if (!event) {
+               perror("Unable to allocate event buffer.\n");
+               exit(3);
+       }
+       memset(event, 0, sizeof *event);
+       guest.avail_idx = 0;
+       guest.kicked_avail_idx = -1;
+       guest.last_used_idx = 0;
+       host.used_idx = 0;
+       host.called_used_idx = -1;
+       for (i = 0; i < ring_size; ++i) {
+               struct desc desc = {
+                       .index = i,
+               };
+               ring[i] = desc;
+       }
+       guest.num_free = ring_size;
+       data = malloc(ring_size * sizeof *data);
+       if (!data) {
+               perror("Unable to allocate data buffer.\n");
+               exit(3);
+       }
+       memset(data, 0, ring_size * sizeof *data);
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       unsigned head, index;
+
+       if (!guest.num_free)
+               return -1;
+
+       guest.num_free--;
+       head = (ring_size - 1) & (guest.avail_idx++);
+
+       /* Start with a write. On MESI architectures this helps
+        * avoid a shared state with consumer that is polling this descriptor.
+        */
+       ring[head].addr = (unsigned long)(void*)buf;
+       ring[head].len = len;
+       /* read below might bypass write above. That is OK because it's just an
+        * optimization. If this happens, we will get the cache line in a
+        * shared state which is unfortunate, but probably not worth it to
+        * add an explicit full barrier to avoid this.
+        */
+       barrier();
+       index = ring[head].index;
+       data[index].buf = buf;
+       data[index].data = datap;
+       /* Barrier A (for pairing) */
+       smp_release();
+       ring[head].flags = DESC_HW;
+
+       return 0;
+}
+
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+       unsigned index;
+       void *datap;
+
+       if (ring[head].flags & DESC_HW)
+               return NULL;
+       /* Barrier B (for pairing) */
+       smp_acquire();
+       *lenp = ring[head].len;
+       index = ring[head].index & (ring_size - 1);
+       datap = data[index].data;
+       *bufp = data[index].buf;
+       data[index].buf = NULL;
+       data[index].data = NULL;
+       guest.num_free++;
+       guest.last_used_idx++;
+       return datap;
+}
+
+void poll_used(void)
+{
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+       while (ring[head].flags & DESC_HW)
+               busy_wait();
+}
+
+void disable_call()
+{
+       /* Doing nothing to disable calls might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_call()
+{
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+       event->call_index = guest.last_used_idx;
+       /* Flush call index write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+       return ring[head].flags & DESC_HW;
+}
+
+void kick_available(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier C (for pairing) */
+       smp_mb();
+       if (!need_event(event->kick_index,
+                       guest.avail_idx,
+                       guest.kicked_avail_idx))
+               return;
+
+       guest.kicked_avail_idx = guest.avail_idx;
+       kick();
+}
+
+/* host side */
+void disable_kick()
+{
+       /* Doing nothing to disable kicks might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_kick()
+{
+       unsigned head = (ring_size - 1) & host.used_idx;
+
+       event->kick_index = host.used_idx;
+       /* Barrier C (for pairing) */
+       smp_mb();
+       return !(ring[head].flags & DESC_HW);
+}
+
+void poll_avail(void)
+{
+       unsigned head = (ring_size - 1) & host.used_idx;
+
+       while (!(ring[head].flags & DESC_HW))
+               busy_wait();
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       unsigned head = (ring_size - 1) & host.used_idx;
+
+       if (!(ring[head].flags & DESC_HW))
+               return false;
+
+       /* make sure length read below is not speculated */
+       /* Barrier A (for pairing) */
+       smp_acquire();
+
+       /* simple in-order completion: we don't need
+        * to touch index at all. This also means we
+        * can just modify the descriptor in-place.
+        */
+       ring[head].len--;
+       /* Make sure len is valid before flags.
+        * Note: alternative is to write len and flags in one access -
+        * possible on 64 bit architectures but wmb is free on Intel anyway
+        * so I have no way to test whether it's a gain.
+        */
+       /* Barrier B (for pairing) */
+       smp_release();
+       ring[head].flags = 0;
+       host.used_idx++;
+       return true;
+}
+
+void call_used(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+       if (!need_event(event->call_index,
+                       host.used_idx,
+                       host.called_used_idx))
+               return;
+
+       host.called_used_idx = host.used_idx;
+       call();
+}
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
new file mode 100755 (executable)
index 0000000..52b0f71
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#use last CPU for host. Why not the first?
+#many devices tend to use cpu0 by default so
+#it tends to be busier
+HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+
+#run command on all cpus
+for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+do
+       #Don't run guest and host on same CPU
+       #It actually works ok if using signalling
+       if
+               (echo "$@" | grep -e "--sleep" > /dev/null) || \
+                       test $HOST_AFFINITY '!=' $cpu
+       then
+               echo "GUEST AFFINITY $cpu"
+               "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu
+       fi
+done
+echo "NO GUEST AFFINITY"
+"$@" --host-affinity $HOST_AFFINITY
+echo "NO AFFINITY"
+"$@"
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
new file mode 100644 (file)
index 0000000..47c9a1a
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Partial implementation of virtio 0.9. event index is used for signalling,
+ * unconditionally. Design roughly follows linux kernel implementation in order
+ * to be able to judge its performance.
+ */
+#define _GNU_SOURCE
+#include "main.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <linux/virtio_ring.h>
+
+struct data {
+       void *data;
+} *data;
+
+struct vring ring;
+
+/* enabling the below activates experimental ring polling code
+ * (which skips index reads on consumer in favor of looking at
+ * high bits of ring id ^ 0x8000).
+ */
+/* #ifdef RING_POLL */
+
+/* how much padding is needed to avoid false cache sharing */
+#define HOST_GUEST_PADDING 0x80
+
+struct guest {
+       unsigned short avail_idx;
+       unsigned short last_used_idx;
+       unsigned short num_free;
+       unsigned short kicked_avail_idx;
+       unsigned short free_head;
+       unsigned char reserved[HOST_GUEST_PADDING - 10];
+} guest;
+
+struct host {
+       /* we do not need to track last avail index
+        * unless we have more than one in flight.
+        */
+       unsigned short used_idx;
+       unsigned short called_used_idx;
+       unsigned char reserved[HOST_GUEST_PADDING - 4];
+} host;
+
+/* implemented by ring */
+void alloc_ring(void)
+{
+       int ret;
+       int i;
+       void *p;
+
+       ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000));
+       if (ret) {
+               perror("Unable to allocate ring buffer.\n");
+               exit(3);
+       }
+       memset(p, 0, vring_size(ring_size, 0x1000));
+       vring_init(&ring, ring_size, p, 0x1000);
+
+       guest.avail_idx = 0;
+       guest.kicked_avail_idx = -1;
+       guest.last_used_idx = 0;
+       /* Put everything in free lists. */
+       guest.free_head = 0;
+       for (i = 0; i < ring_size - 1; i++)
+               ring.desc[i].next = i + 1;
+       host.used_idx = 0;
+       host.called_used_idx = -1;
+       guest.num_free = ring_size;
+       data = malloc(ring_size * sizeof *data);
+       if (!data) {
+               perror("Unable to allocate data buffer.\n");
+               exit(3);
+       }
+       memset(data, 0, ring_size * sizeof *data);
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       unsigned head, avail;
+       struct vring_desc *desc;
+
+       if (!guest.num_free)
+               return -1;
+
+       head = guest.free_head;
+       guest.num_free--;
+
+       desc = ring.desc;
+       desc[head].flags = VRING_DESC_F_NEXT;
+       desc[head].addr = (unsigned long)(void *)buf;
+       desc[head].len = len;
+       /* We do it like this to simulate the way
+        * we'd have to flip it if we had multiple
+        * descriptors.
+        */
+       desc[head].flags &= ~VRING_DESC_F_NEXT;
+       guest.free_head = desc[head].next;
+
+       data[head].data = datap;
+
+#ifdef RING_POLL
+       /* Barrier A (for pairing) */
+       smp_release();
+       avail = guest.avail_idx++;
+       ring.avail->ring[avail & (ring_size - 1)] =
+               (head | (avail & ~(ring_size - 1))) ^ 0x8000;
+#else
+       avail = (ring_size - 1) & (guest.avail_idx++);
+       ring.avail->ring[avail] = head;
+       /* Barrier A (for pairing) */
+       smp_release();
+#endif
+       ring.avail->idx = guest.avail_idx;
+       return 0;
+}
+
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       unsigned head;
+       unsigned index;
+       void *datap;
+
+#ifdef RING_POLL
+       head = (ring_size - 1) & guest.last_used_idx;
+       index = ring.used->ring[head].id;
+       if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
+               return NULL;
+       /* Barrier B (for pairing) */
+       smp_acquire();
+       index &= ring_size - 1;
+#else
+       if (ring.used->idx == guest.last_used_idx)
+               return NULL;
+       /* Barrier B (for pairing) */
+       smp_acquire();
+       head = (ring_size - 1) & guest.last_used_idx;
+       index = ring.used->ring[head].id;
+#endif
+       *lenp = ring.used->ring[head].len;
+       datap = data[index].data;
+       *bufp = (void*)(unsigned long)ring.desc[index].addr;
+       data[index].data = NULL;
+       ring.desc[index].next = guest.free_head;
+       guest.free_head = index;
+       guest.num_free++;
+       guest.last_used_idx++;
+       return datap;
+}
+
+void poll_used(void)
+{
+#ifdef RING_POLL
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+       for (;;) {
+               unsigned index = ring.used->ring[head].id;
+
+               if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
+                       busy_wait();
+               else
+                       break;
+       }
+#else
+       unsigned head = guest.last_used_idx;
+
+       while (ring.used->idx == head)
+               busy_wait();
+#endif
+}
+
+void disable_call()
+{
+       /* Doing nothing to disable calls might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_call()
+{
+       unsigned short last_used_idx;
+
+       vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
+       /* Flush call index write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+#ifdef RING_POLL
+       {
+               unsigned short head = last_used_idx & (ring_size - 1);
+               unsigned index = ring.used->ring[head].id;
+
+               return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
+       }
+#else
+       return ring.used->idx == last_used_idx;
+#endif
+}
+
+void kick_available(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier C (for pairing) */
+       smp_mb();
+       if (!vring_need_event(vring_avail_event(&ring),
+                             guest.avail_idx,
+                             guest.kicked_avail_idx))
+               return;
+
+       guest.kicked_avail_idx = guest.avail_idx;
+       kick();
+}
+
+/* host side */
+void disable_kick()
+{
+       /* Doing nothing to disable kicks might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_kick()
+{
+       unsigned head = host.used_idx;
+
+       vring_avail_event(&ring) = head;
+       /* Barrier C (for pairing) */
+       smp_mb();
+#ifdef RING_POLL
+       {
+               unsigned index = ring.avail->ring[head & (ring_size - 1)];
+
+               return (index ^ head ^ 0x8000) & ~(ring_size - 1);
+       }
+#else
+       return head == ring.avail->idx;
+#endif
+}
+
+void poll_avail(void)
+{
+       unsigned head = host.used_idx;
+#ifdef RING_POLL
+       for (;;) {
+               unsigned index = ring.avail->ring[head & (ring_size - 1)];
+               if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
+                       busy_wait();
+               else
+                       break;
+       }
+#else
+       while (ring.avail->idx == head)
+               busy_wait();
+#endif
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       unsigned used_idx = host.used_idx;
+       struct vring_desc *desc;
+       unsigned head;
+
+#ifdef RING_POLL
+       head = ring.avail->ring[used_idx & (ring_size - 1)];
+       if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1))
+               return false;
+       /* Barrier A (for pairing) */
+       smp_acquire();
+
+       used_idx &= ring_size - 1;
+       desc = &ring.desc[head & (ring_size - 1)];
+#else
+       if (used_idx == ring.avail->idx)
+               return false;
+
+       /* Barrier A (for pairing) */
+       smp_acquire();
+
+       used_idx &= ring_size - 1;
+       head = ring.avail->ring[used_idx];
+       desc = &ring.desc[head];
+#endif
+
+       *lenp = desc->len;
+       *bufp = (void *)(unsigned long)desc->addr;
+
+       /* now update used ring */
+       ring.used->ring[used_idx].id = head;
+       ring.used->ring[used_idx].len = desc->len - 1;
+       /* Barrier B (for pairing) */
+       smp_release();
+       host.used_idx++;
+       ring.used->idx = host.used_idx;
+       
+       return true;
+}
+
+void call_used(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+       if (!vring_need_event(vring_used_event(&ring),
+                             host.used_idx,
+                             host.called_used_idx))
+               return;
+
+       host.called_used_idx = host.used_idx;
+       call();
+}
diff --git a/tools/virtio/ringtest/virtio_ring_poll.c b/tools/virtio/ringtest/virtio_ring_poll.c
new file mode 100644 (file)
index 0000000..84fc2c5
--- /dev/null
@@ -0,0 +1,2 @@
+#define RING_POLL 1
+#include "virtio_ring_0_9.c"